code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from math import sqrt
def _a ( lowerCAmelCase_ = 1_000_000 ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0
_snake_case : List[Any] = 0
_snake_case : str = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCAmelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase (a__ ):
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( lowercase__ ) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCAmelCase : List[str] = TypeVar('KT')
UpperCAmelCase : Dict = TypeVar('VT')
class lowerCamelCase (Generic[KT, VT] ):
def __init__( self , lowercase__ = "root" , lowercase__ = None ) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = key
_snake_case : Union[str, Any] = value
_snake_case : str = []
def __repr__( self ) -> Tuple:
"""simple docstring"""
return F'''Node({self.key}: {self.value})'''
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.forward )
class lowerCamelCase (Generic[KT, VT] ):
def __init__( self , lowercase__ = 0.5 , lowercase__ = 16 ) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = Node[KT, VT]()
_snake_case : List[str] = 0
_snake_case : Tuple = p
_snake_case : int = max_level
def __str__( self ) -> int:
"""simple docstring"""
_snake_case : Tuple = list(self )
if len(lowercase__ ) == 0:
return F'''SkipList(level={self.level})'''
_snake_case : Any = max((len(str(lowercase__ ) ) for item in items) , default=4 )
_snake_case : int = max(lowercase__ , 4 ) + 4
_snake_case : List[Any] = self.head
_snake_case : str = []
_snake_case : Optional[Any] = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(lowercase__ , '''-''' ) + '''* ''' * len(lowercase__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
while len(node.forward ) != 0:
_snake_case : str = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(lowercase__ , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
_snake_case : Any = node.forward
lines.append('''None'''.ljust(lowercase__ ) + '''* ''' * len(lowercase__ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(lowercase__ )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_snake_case : Union[str, Any] = node.forward[0]
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_snake_case : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case , _snake_case : str = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_snake_case : List[str] = node.forward[i]
else:
_snake_case : Any = update_node.forward[:i]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Optional[int]:
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = self._locate_node(lowercase__ )
if node is not None:
_snake_case : Dict = value
else:
_snake_case : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowercase__ ):
update_vector.append(self.head )
_snake_case : Optional[Any] = level
_snake_case : Union[str, Any] = Node(lowercase__ , lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
_snake_case : Tuple = new_node
def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def _a ( ):
"""simple docstring"""
_snake_case : int = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
_snake_case : Union[str, Any] = skip_list.head
_snake_case : Dict = {}
while node.level != 0:
_snake_case : List[str] = node.forward[0]
_snake_case : List[str] = node.value
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
_snake_case : List[Any] = skip_list.head
_snake_case : List[Any] = {}
while node.level != 0:
_snake_case : Optional[Any] = node.forward[0]
_snake_case : Any = node.value
if len(_UpperCamelCase ) != 4:
print()
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = SkipList()
assert skip_list.find('''Some key''' ) is None
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def _a ( ):
"""simple docstring"""
_snake_case : Optional[int] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _a ( ):
"""simple docstring"""
_snake_case : Optional[Any] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a ( ):
"""simple docstring"""
_snake_case : Any = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a ( ):
"""simple docstring"""
_snake_case : Union[str, Any] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(lowerCAmelCase_ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _a ( ):
"""simple docstring"""
def is_sorted(lowerCAmelCase_ ):
return all(next_item >= item for item, next_item in zip(_UpperCamelCase , lst[1:] ) )
_snake_case : str = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCamelCase , _UpperCamelCase )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCamelCase ) )
def _a ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _a ( ):
"""simple docstring"""
_snake_case : int = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : Any = do_resize
_snake_case : List[str] = do_rescale
_snake_case : Any = size_divisor
_snake_case : Optional[Any] = resample
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
_snake_case , _snake_case : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_snake_case : Optional[int] = height // size_divisor * size_divisor
_snake_case : Dict = width // size_divisor * size_divisor
_snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_snake_case : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_snake_case : Tuple = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
_snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 47
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class lowerCamelCase (__UpperCAmelCase ):
_lowercase : Dict = """roberta-prelayernorm"""
def __init__( self , lowercase__=50_265 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
_snake_case : Any = vocab_size
_snake_case : str = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : str = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : List[str] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[int] = position_embedding_type
_snake_case : int = use_cache
_snake_case : List[Any] = classifier_dropout
class lowerCamelCase (__UpperCAmelCase ):
@property
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCamelCase (nn.Module ):
_lowercase : int
_lowercase : jnp.dtype = jnp.floataa
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = hidden_states.shape
_snake_case : List[Any] = jax.image.resize(
lowerCamelCase__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_snake_case : Any = self.conv(lowerCamelCase__ )
return hidden_states
class lowerCamelCase (nn.Module ):
_lowercase : int
_lowercase : jnp.dtype = jnp.floataa
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : List[str] = self.conv(lowerCamelCase__ )
return hidden_states
class lowerCamelCase (nn.Module ):
_lowercase : int
_lowercase : int = None
_lowercase : float = 0.0
_lowercase : bool = None
_lowercase : jnp.dtype = jnp.floataa
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : str = self.in_channels if self.out_channels is None else self.out_channels
_snake_case : Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_snake_case : Tuple = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_snake_case : Union[str, Any] = nn.Dense(lowerCamelCase__ , dtype=self.dtype )
_snake_case : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_snake_case : Tuple = nn.Dropout(self.dropout_prob )
_snake_case : List[str] = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_snake_case : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_snake_case : Optional[Any] = None
if use_nin_shortcut:
_snake_case : List[str] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , lowercase__ , lowercase__ , lowercase__=True ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[int] = hidden_states
_snake_case : int = self.norma(lowerCamelCase__ )
_snake_case : List[str] = nn.swish(lowerCamelCase__ )
_snake_case : Tuple = self.conva(lowerCamelCase__ )
_snake_case : List[str] = self.time_emb_proj(nn.swish(lowerCamelCase__ ) )
_snake_case : str = jnp.expand_dims(jnp.expand_dims(lowerCamelCase__ , 1 ) , 1 )
_snake_case : Tuple = hidden_states + temb
_snake_case : Dict = self.norma(lowerCamelCase__ )
_snake_case : int = nn.swish(lowerCamelCase__ )
_snake_case : str = self.dropout(lowerCamelCase__ , lowerCamelCase__ )
_snake_case : int = self.conva(lowerCamelCase__ )
if self.conv_shortcut is not None:
_snake_case : str = self.conv_shortcut(lowerCamelCase__ )
return hidden_states + residual
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase : Optional[Any] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = None
_snake_case : Dict = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_snake_case : List[Any] = os.path.abspath('''examples''' )
for item in os.listdir(lowercase__ ):
if item not in EXCLUDE_EXAMPLES:
_snake_case : Tuple = os.path.join(lowercase__ , lowercase__ )
if os.path.isfile(lowercase__ ) and ".py" in item_path:
with self.subTest(
tested_script=lowercase__ , feature_script=lowercase__ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_snake_case : List[Any] = compare_against_test(
os.path.join(lowercase__ , lowercase__ ) , lowercase__ , lowercase__ , lowercase__ )
_snake_case : str = '''\n'''.join(lowercase__ )
if special_strings is not None:
for string in special_strings:
_snake_case : Union[str, Any] = diff.replace(lowercase__ , '''''' )
self.assertEqual(lowercase__ , '''''' )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , lowercase__ )
self.one_complete_example('''complete_nlp_example.py''' , lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_snake_case : Dict = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , lowercase__ , lowercase__ , lowercase__ )
self.one_complete_example('''complete_cv_example.py''' , lowercase__ , lowercase__ , lowercase__ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class lowerCamelCase (UpperCamelCase_ ):
_lowercase : Union[str, Any] = False
@classmethod
def UpperCAmelCase_ ( cls ) -> List[Any]:
"""simple docstring"""
super().setUpClass()
_snake_case : List[Any] = tempfile.mkdtemp()
_snake_case : Tuple = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_snake_case : Dict = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls ) -> List[Any]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Dict = F'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[Any] = F'''\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '''.split()
_snake_case : List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : Tuple = F'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '''.split()
_snake_case : Dict = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
self.assertNotIn('''epoch 0:''' , lowercase__ )
self.assertIn('''epoch 1:''' , lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = F'''\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '''.split()
_snake_case : Dict = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
if torch.cuda.is_available():
_snake_case : List[str] = torch.cuda.device_count()
else:
_snake_case : List[Any] = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , lowercase__ )
self.assertIn('''epoch 1:''' , lowercase__ )
else:
self.assertIn('''epoch 0:''' , lowercase__ )
self.assertIn('''epoch 1:''' , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_snake_case : Any = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
_snake_case : Dict = re.findall('''({.+})''' , lowercase__ )
_snake_case : int = [r for r in results if '''accuracy''' in r][-1]
_snake_case : List[str] = ast.literal_eval(lowercase__ )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
_snake_case : Optional[int] = F'''\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''tracking''' ) ) )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : int = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 718
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 719
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase : int = typing.Union[np.floataa, int, float] # noqa: UP007
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowerCAmelCase_ ) - np.asarray(lowerCAmelCase_ )) ** 2 ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _a ( ):
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 721
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase (unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
_snake_case : int = size if size is not None else {'''shortest_edge''': 18}
_snake_case : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_snake_case : Union[str, Any] = parent
_snake_case : List[str] = batch_size
_snake_case : Optional[Any] = num_channels
_snake_case : Any = image_size
_snake_case : Union[str, Any] = min_resolution
_snake_case : Optional[Any] = max_resolution
_snake_case : Any = do_resize
_snake_case : Union[str, Any] = size
_snake_case : Optional[int] = do_center_crop
_snake_case : Tuple = crop_size
_snake_case : List[Any] = do_normalize
_snake_case : Dict = image_mean
_snake_case : str = image_std
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase (a__ , unittest.TestCase ):
_lowercase : Tuple = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : int = LevitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case : Any = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
_snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case : List[str] = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
_snake_case : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case : Dict = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 700
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 0
|
'''simple docstring'''
import os
from pathlib import Path
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_snake_case : Dict = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
_snake_case : List[Any] = f'''{src_lang}-{tgt_lang}'''
_snake_case : Optional[int] = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case : int = os.path.join(lowerCAmelCase_ , '''README.md''' )
print(f'''Generating {path}''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase_ )
# make sure we are under the root of the project
UpperCAmelCase : str = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase : int = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase : Union[str, Any] = model_name.split('-')
UpperCAmelCase : Optional[int] = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 701
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 0
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 702
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase : Optional[Any] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
"""simple docstring"""
_snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_snake_case : str = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_snake_case : Union[str, Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 47
| 0
|
def _a ( lowerCAmelCase_ = 50 ):
"""simple docstring"""
_snake_case : str = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 704
|
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = 0, 1
while True:
_snake_case , _snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase : Any = None
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : int = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
UpperCAmelCase : Optional[int] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCamelCase (a__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = ["""input_ids""", """attention_mask"""]
_lowercase : List[Any] = NllbTokenizer
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , **lowercase__ , ) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
_snake_case : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , )
_snake_case : Tuple = vocab_file
_snake_case : int = False if not self.vocab_file else True
_snake_case : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_snake_case : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_snake_case : List[str] = src_lang if src_lang is not None else '''eng_Latn'''
_snake_case : List[str] = self.convert_tokens_to_ids(self._src_lang )
_snake_case : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = [self.sep_token_id]
_snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_snake_case : List[str] = src_lang
_snake_case : Optional[Any] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
_snake_case : str = self.convert_tokens_to_ids(lowercase__ )
_snake_case : str = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = "eng_Latn" , lowercase__ = None , lowercase__ = "fra_Latn" , **lowercase__ , ) -> BatchEncoding:
"""simple docstring"""
_snake_case : Optional[Any] = src_lang
_snake_case : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : List[Any] = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
_snake_case : Any = []
_snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
_snake_case : int = [self.cur_lang_code]
_snake_case : List[Any] = [self.eos_token_id]
_snake_case : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_snake_case : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : str = self.convert_tokens_to_ids(lowercase__ )
if self.legacy_behaviour:
_snake_case : List[str] = []
_snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
_snake_case : int = [self.cur_lang_code]
_snake_case : str = [self.eos_token_id]
_snake_case : Any = self.convert_ids_to_tokens(self.prefix_tokens )
_snake_case : int = self.convert_ids_to_tokens(self.suffix_tokens )
_snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
_snake_case : Optional[int] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 705
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase (a__ ):
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> Any:
"""simple docstring"""
_snake_case : List[Any] = path_or_paths
_snake_case : Union[str, Any] = split if split or isinstance(lowercase__ , lowercase__ ) else '''train'''
_snake_case : Any = features
_snake_case : Union[str, Any] = cache_dir
_snake_case : int = keep_in_memory
_snake_case : int = streaming
_snake_case : Dict = num_proc
_snake_case : str = kwargs
@abstractmethod
def UpperCAmelCase_ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class lowerCamelCase (a__ ):
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = features
_snake_case : int = cache_dir
_snake_case : Any = keep_in_memory
_snake_case : str = streaming
_snake_case : List[Any] = num_proc
_snake_case : Dict = kwargs
@abstractmethod
def UpperCAmelCase_ ( self ) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 706
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if start is None:
_snake_case : Optional[Any] = 0
if end is None:
_snake_case : Any = len(lowerCAmelCase_ ) - 1
if start >= end:
return
_snake_case : Optional[Any] = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_snake_case : Dict = TOKENIZER_CLASSES
else:
_snake_case : Any = {tokenizer_name: getattr(lowerCAmelCase_ , tokenizer_name + '''Fast''' )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_snake_case : Any = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[int] = True
if checkpoint_name is None:
_snake_case : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Union[str, Any] = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_snake_case : Dict = tokenizer_class.from_pretrained(lowerCAmelCase_ , force_download=lowerCAmelCase_ )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case : Dict = checkpoint.split('''/''' )
_snake_case : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
elif add_prefix:
_snake_case : Any = checkpoint
_snake_case : List[str] = dump_path
else:
_snake_case : Union[str, Any] = None
_snake_case : Any = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : str = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : List[str] = file_path.split(lowerCAmelCase_ )[-1][0]
if next_char == "/":
_snake_case : Optional[int] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[int] = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_snake_case : List[str] = tokenizer.save_pretrained(
lowerCAmelCase_ , legacy_format=lowerCAmelCase_ , filename_prefix=lowerCAmelCase_ )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(lowerCAmelCase_ )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 707
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
_snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean()
_snake_case : Tuple = -(labels.shape[-1] * loss.item())
_snake_case : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 47
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {'vocab_file': 'spiece.model'}
UpperCAmelCase : Optional[Any] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
UpperCAmelCase : int = {'bert_for_seq_generation': 5_1_2}
class lowerCamelCase (a__ ):
_lowercase : Tuple = VOCAB_FILES_NAMES
_lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[int] = []
_lowercase : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<::::>" , lowercase__ = None , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sep_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
_snake_case : Dict = vocab_file
_snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : str = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
_snake_case : Tuple = self.__dict__.copy()
_snake_case : Union[str, Any] = None
return state
def __setstate__( self , lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case : List[Any] = {}
_snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> str:
"""simple docstring"""
return self.sp_model.piece_to_id(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = self.sp_model.IdToPiece(lowercase__ )
return token
def UpperCAmelCase_ ( self , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : Any = []
_snake_case : List[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase__ ) + token
_snake_case : str = []
else:
current_sub_tokens.append(lowercase__ )
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : Optional[int] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
_snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 708
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Any = torch.nn.Linear(10 , 10 )
_snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
_snake_case : List[str] = Accelerator()
_snake_case : Optional[Any] = accelerator.prepare(lowercase__ )
try:
pickle.loads(pickle.dumps(lowercase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase (a__ ):
_lowercase : Union[str, Any] = """speech_to_text"""
_lowercase : str = ["""past_key_values"""]
_lowercase : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase__=10_000 , lowercase__=12 , lowercase__=2_048 , lowercase__=4 , lowercase__=6 , lowercase__=2_048 , lowercase__=4 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="relu" , lowercase__=256 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=2 , lowercase__=True , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__=6_000 , lowercase__=1_024 , lowercase__=2 , lowercase__=(5, 5) , lowercase__=1_024 , lowercase__=80 , lowercase__=1 , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
_snake_case : int = vocab_size
_snake_case : str = d_model
_snake_case : Optional[Any] = encoder_ffn_dim
_snake_case : List[str] = encoder_layers
_snake_case : List[Any] = encoder_attention_heads
_snake_case : Union[str, Any] = decoder_ffn_dim
_snake_case : List[str] = decoder_layers
_snake_case : Any = decoder_attention_heads
_snake_case : Any = dropout
_snake_case : List[str] = attention_dropout
_snake_case : Dict = activation_dropout
_snake_case : List[str] = activation_function
_snake_case : Optional[int] = init_std
_snake_case : List[str] = encoder_layerdrop
_snake_case : Any = decoder_layerdrop
_snake_case : Any = use_cache
_snake_case : Optional[Any] = encoder_layers
_snake_case : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : int = max_source_positions
_snake_case : Optional[int] = max_target_positions
_snake_case : int = num_conv_layers
_snake_case : Optional[int] = list(lowercase__ )
_snake_case : List[str] = conv_channels
_snake_case : Tuple = input_feat_per_channel
_snake_case : int = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
| 709
|
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tuple[float, float, float]
UpperCAmelCase : int = tuple[float, float, float]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = end_pointa[0] - end_pointa[0]
_snake_case : Tuple = end_pointa[1] - end_pointa[1]
_snake_case : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ):
"""simple docstring"""
_snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_snake_case : int = k.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if k.startswith('''encoder''' ):
_snake_case : Optional[int] = k.replace('''.attn''' , '''.self_attn''' )
_snake_case : str = k.replace('''norm1''' , '''self_attn_layer_norm''' )
_snake_case : Any = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_snake_case : Optional[int] = k.replace('''norm1''' , '''self_attn_layer_norm''' )
_snake_case : Any = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
_snake_case : Union[str, Any] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_snake_case : Optional[int] = sd.pop(lowerCAmelCase_ )
_snake_case : Tuple = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
_snake_case : Any = v
UpperCAmelCase : Any = ['START']
@torch.no_grad()
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
_snake_case : Tuple = model['''model''']
_snake_case : Dict = BlenderbotConfig.from_json_file(lowerCAmelCase_ )
_snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(lowerCAmelCase_ )
_snake_case : Optional[int] = m.model.state_dict().keys()
_snake_case : Optional[Any] = []
_snake_case : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_snake_case : Optional[int] = rename_state_dict_key(lowerCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase_ )
m.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
m.half()
m.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
UpperCAmelCase : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 710
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
if unlogit:
_snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ )
_snake_case : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
_snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : Dict = None
_snake_case : Dict = 0.0
_snake_case : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : Any = 2
_snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
_snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
_snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold )
_snake_case : int = torch.ones_like(lowerCAmelCase_ )
_snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : int = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Dict = float('''Inf''' )
_snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : str = new_head_mask.view_as(lowerCAmelCase_ )
_snake_case : Dict = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : Any = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : int = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : Tuple = 1 / loss
_snake_case : Dict = datetime.now() - before_time
_snake_case : List[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
_snake_case : List[str] = sum(p.numel() for p in model.parameters() )
_snake_case : int = datetime.now()
_snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
_snake_case : Optional[int] = 1 / loss
_snake_case : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : List[str] = torch.device('''cuda''' , args.local_rank )
_snake_case : int = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
_snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Prepare dataset
_snake_case : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),)
_snake_case : Tuple = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[str] = RandomSampler(lowerCAmelCase_ )
_snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase : Optional[Any] = TypeVar('T')
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return (position - 1) // 2
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return (2 * position) + 1
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return (2 * position) + 2
class lowerCamelCase (Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : list[tuple[T, int]] = []
_snake_case : dict[T, int] = {}
_snake_case : int = 0
def __len__( self ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase_ ( self ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
_snake_case : Optional[Any] = self.elements
self.elements += 1
self._bubble_up(lowercase__ )
def UpperCAmelCase_ ( self ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_snake_case : List[str] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_snake_case : Tuple = self.heap[0]
self._bubble_down(lowercase__ )
return elem
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : List[str] = self.position_map[elem]
_snake_case : Any = (elem, weight)
if position > 0:
_snake_case : Optional[int] = get_parent_position(lowercase__ )
_snake_case : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase__ )
else:
self._bubble_down(lowercase__ )
else:
self._bubble_down(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.position_map[elem]
if curr_pos == 0:
return None
_snake_case : Optional[int] = get_parent_position(lowercase__ )
_snake_case : Optional[Any] = self.heap[curr_pos]
_snake_case : int = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_up(lowercase__ )
return None
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Any = self.position_map[elem]
_snake_case : List[Any] = self.heap[curr_pos]
_snake_case : Dict = get_child_left_position(lowercase__ )
_snake_case : Optional[int] = get_child_right_position(lowercase__ )
if child_left_position < self.elements and child_right_position < self.elements:
_snake_case : Any = self.heap[child_left_position]
_snake_case : str = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
if child_left_position < self.elements:
_snake_case : List[str] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
else:
return None
if child_right_position < self.elements:
_snake_case : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : List[Any] = self.heap[nodea_pos][0]
_snake_case : str = self.heap[nodea_pos][0]
_snake_case : Union[str, Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_snake_case : Dict = nodea_pos
_snake_case : Any = nodea_pos
class lowerCamelCase (Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : dict[T, dict[T, int]] = {}
_snake_case : int = 0
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
if node not in self.connections:
_snake_case : Optional[int] = {}
self.nodes += 1
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
self.add_node(lowercase__ )
self.add_node(lowercase__ )
_snake_case : Union[str, Any] = weight
_snake_case : Any = weight
def _a ( lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case : dict[T, int] = {node: maxsize for node in graph.connections}
_snake_case : dict[T, T | None] = {node: None for node in graph.connections}
_snake_case : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase_ , lowerCAmelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
_snake_case : str = priority_queue.extract_min()
_snake_case : Optional[int] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_snake_case : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] )
_snake_case : int = node
# running prim's algorithm
while not priority_queue.is_empty():
_snake_case : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_snake_case : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_ , dist[neighbour] )
_snake_case : List[str] = node
return dist, parent
| 711
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_snake_case : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : int = 2
while digits < n:
index += 1
_snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCAmelCase : List[str] = float('nan')
class lowerCamelCase :
def __init__( self , lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = sys.stdout
_snake_case : List[Any] = open(lowercase__ , '''a''' )
def __getattr__( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return getattr(self.stdout , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Dict:
"""simple docstring"""
self.stdout.write(lowercase__ )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , lowercase__ , 0 , re.M ) )
def _a ( lowerCAmelCase_=80 , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Any = []
# deal with critical env vars
_snake_case : List[str] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_snake_case : List[Any] = os.environ.get(lowerCAmelCase_ , lowerCAmelCase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
_snake_case : Any = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(lowerCAmelCase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_snake_case : Union[str, Any] = []
_snake_case : str = ''''''
while len(lowerCAmelCase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowerCAmelCase_ )
_snake_case : Optional[Any] = ''''''
return "\\\n".join(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
_snake_case : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_snake_case : List[str] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
_snake_case : int = subprocess.run(lowerCAmelCase_ , capture_output=lowerCAmelCase_ , text=lowerCAmelCase_ )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
_snake_case : Optional[Any] = variation.replace(''' ''' , '''-''' )
with open(Path(lowerCAmelCase_ ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(lowerCAmelCase_ ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
_snake_case : Union[str, Any] = json.load(lowerCAmelCase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
_snake_case : Dict = f'''{id}: {variation:<{longest_variation_len}}'''
_snake_case : int = f'''{preamble}: '''
_snake_case : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowerCAmelCase_ ) , desc=lowerCAmelCase_ , leave=lowerCAmelCase_ ):
_snake_case : Optional[Any] = process_run_single(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = single_run_metrics[target_metric_key]
if not math.isnan(lowerCAmelCase_ ):
metrics.append(lowerCAmelCase_ )
results.append(lowerCAmelCase_ )
outcome += "✓"
else:
outcome += "✘"
_snake_case : Any = f'''\33[2K\r{outcome}'''
if len(lowerCAmelCase_ ) > 0:
_snake_case : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_snake_case : str = round(mean_metrics[target_metric_key] , 2 )
_snake_case : Any = f'''{outcome} {mean_target}'''
if len(lowerCAmelCase_ ) > 1:
results_str += f''' {tuple(round(lowerCAmelCase_ , 2 ) for x in results )}'''
print(lowerCAmelCase_ )
_snake_case : List[Any] = variation
return mean_metrics
else:
print(lowerCAmelCase_ )
return {variation_key: variation, target_metric_key: nan}
def _a ( ):
"""simple docstring"""
_snake_case : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Union[str, Any] = pd.DataFrame(lowerCAmelCase_ )
_snake_case : Tuple = '''variation'''
_snake_case : Any = '''diff_%'''
_snake_case : Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_snake_case : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowerCAmelCase_ ):
# as a fallback, use the minimal value as the sentinel
_snake_case : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowerCAmelCase_ ):
_snake_case : str = df.apply(
lambda lowerCAmelCase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
_snake_case : Optional[int] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_snake_case : List[str] = df.reindex(lowerCAmelCase_ , axis='''columns''' ) # reorder cols
# capitalize
_snake_case : List[str] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
_snake_case : str = df.rename(lambda lowerCAmelCase_ : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
_snake_case : Any = df.rename(lambda lowerCAmelCase_ : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
_snake_case : List[str] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowerCAmelCase_ , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowerCAmelCase_ , floatfmt='''.2f''' )]
print('''\n\n'''.join(lowerCAmelCase_ ) )
def _a ( ):
"""simple docstring"""
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , nargs='''+''' , required=lowerCAmelCase_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=lowerCAmelCase_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=lowerCAmelCase_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=lowerCAmelCase_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=lowerCAmelCase_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
_snake_case : Any = parser.parse_args()
_snake_case : Dict = args.output_dir
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
_snake_case : Any = get_base_command(lowerCAmelCase_ , lowerCAmelCase_ )
# split each dimension into its --foo variations
_snake_case : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , lowerCAmelCase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_snake_case : int = list(map(str.strip , map(''' '''.join , itertools.product(*lowerCAmelCase_ ) ) ) )
_snake_case : Dict = max(len(lowerCAmelCase_ ) for x in variations )
# split wanted keys
_snake_case : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_snake_case : Any = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
_snake_case : List[Any] = Tee(lowerCAmelCase_ )
print(f'''\n*** Running {len(lowerCAmelCase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(lowerCAmelCase_ )}''' )
_snake_case : List[Any] = '''variation'''
_snake_case : Tuple = []
for id, variation in enumerate(tqdm(lowerCAmelCase_ , desc='''Total completion: ''' , leave=lowerCAmelCase_ ) ):
_snake_case : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.repeat_times , lowerCAmelCase_ , args.verbose , ) )
process_results(lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.base_variation , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 0
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 713
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCAmelCase : List[Any] = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : int = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase : Dict = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : Any = do_resize
_snake_case : List[str] = do_rescale
_snake_case : Any = size_divisor
_snake_case : Optional[Any] = resample
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
_snake_case , _snake_case : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_snake_case : Optional[int] = height // size_divisor * size_divisor
_snake_case : Dict = width // size_divisor * size_divisor
_snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_snake_case : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_snake_case : Tuple = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
_snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = (boundary[1] - boundary[0]) / steps
_snake_case : Optional[Any] = boundary[0]
_snake_case : List[Any] = boundary[1]
_snake_case : List[str] = make_points(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[int] = 0.0
y += (h / 2.0) * f(lowerCAmelCase_ )
for i in x_i:
# print(i)
y += h * f(lowerCAmelCase_ )
y += (h / 2.0) * f(lowerCAmelCase_ )
return y
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = a + h
while x < (b - h):
yield x
_snake_case : str = x + h
def _a ( lowerCAmelCase_ ): # enter your function here
"""simple docstring"""
_snake_case : Tuple = (x - 0) * (x - 0)
return y
def _a ( ):
"""simple docstring"""
_snake_case : str = 0.0 # Lower bound of integration
_snake_case : Optional[int] = 1.0 # Upper bound of integration
_snake_case : str = 10.0 # define number of steps or resolution
_snake_case : int = [a, b] # define boundary of integration
_snake_case : Any = method_a(lowerCAmelCase_ , lowerCAmelCase_ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ):
"""simple docstring"""
model.train()
_snake_case : Optional[int] = model(lowerCAmelCase_ )
_snake_case : Any = F.mse_loss(lowerCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
set_seed(42 )
_snake_case : Optional[Any] = RegressionModel()
_snake_case : Dict = deepcopy(lowerCAmelCase_ )
_snake_case : Union[str, Any] = RegressionDataset(length=80 )
_snake_case : List[Any] = DataLoader(lowerCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
_snake_case : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
_snake_case : str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_snake_case : str = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda lowerCAmelCase_ : epoch**0.65 )
_snake_case : Any = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda lowerCAmelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
_snake_case : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = get_training_setup(lowerCAmelCase_ )
# Use a single batch
_snake_case : Any = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_snake_case : int = accelerator.gather((ddp_input, ddp_target) )
_snake_case : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_snake_case : Dict = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = get_training_setup(lowerCAmelCase_ )
# Use a single batch
_snake_case : int = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_snake_case : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
_snake_case : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_snake_case : List[Any] = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def _a ( lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Any = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_snake_case : Union[str, Any] = get_training_setup(lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
_snake_case : List[Any] = accelerator.gather((ddp_input, ddp_target) )
_snake_case : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_snake_case : str = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
GradientState._reset_state()
def _a ( lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Union[str, Any] = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_snake_case : int = get_training_setup(lowerCAmelCase_ , lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
_snake_case : Dict = batch.values()
# Gather the distributed inputs and targs for the base model
_snake_case : Any = accelerator.gather((ddp_input, ddp_target) )
_snake_case : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
_snake_case : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def _a ( ):
"""simple docstring"""
_snake_case : int = Accelerator()
_snake_case : Optional[int] = RegressionDataset(length=80 )
_snake_case : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 )
_snake_case : Optional[int] = RegressionDataset(length=96 )
_snake_case : Tuple = DataLoader(lowerCAmelCase_ , batch_size=16 )
_snake_case : Optional[int] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if iteration < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if batch_num < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
"""simple docstring"""
_snake_case : Dict = Accelerator()
_snake_case : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(lowerCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(lowerCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowerCAmelCase_ , lowerCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ = 4 ):
"""simple docstring"""
_snake_case : List[Any] = abs(lowerCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return reverse_row(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return reverse_row(reverse_column(lowerCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return reverse_column(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = [list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )]
return matrix
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = matrix[::-1]
return matrix
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = [x[::-1] for x in matrix]
return matrix
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for i in matrix:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : Dict = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
UpperCAmelCase : List[str] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
UpperCAmelCase : List[str] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 718
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 719
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=16 , lowercase__=2 , lowercase__=4 , lowercase__=4 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=32 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=0.02 , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = parent
_snake_case : int = batch_size
_snake_case : List[str] = seq_length
_snake_case : List[str] = is_training
_snake_case : Any = use_labels
_snake_case : Optional[Any] = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : Any = intermediate_size
_snake_case : Any = hidden_act
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Optional[int] = eos_token_id
_snake_case : Optional[Any] = pad_token_id
_snake_case : List[Any] = bos_token_id
_snake_case : Any = initializer_range
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case : Any = shift_tokens_right(lowercase__ , 1 , 2 )
_snake_case : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase__ , )
_snake_case : List[Any] = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = 20
_snake_case : int = model_class_name(lowercase__ )
_snake_case : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
_snake_case : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_snake_case : int = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_snake_case : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_snake_case : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case : List[Any] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_snake_case : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
_snake_case : Optional[Any] = model.decode(lowercase__ , lowercase__ )
_snake_case : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
_snake_case : Dict = 20
_snake_case : Any = model_class_name(lowercase__ )
_snake_case : List[Any] = model.encode(inputs_dict['''input_ids'''] )
_snake_case : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_snake_case : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_snake_case : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_snake_case : List[str] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
_snake_case : Optional[Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
_snake_case : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase (unittest.TestCase ):
_lowercase : Optional[Any] = 99
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_snake_case : Union[str, Any] = input_ids.shape[0]
_snake_case : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Tuple = self._get_config_and_data()
_snake_case : Dict = FlaxBlenderbotForConditionalGeneration(lowercase__ )
_snake_case : List[Any] = lm_model(input_ids=lowercase__ )
_snake_case : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_snake_case : Optional[Any] = FlaxBlenderbotForConditionalGeneration(lowercase__ )
_snake_case : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_snake_case : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_snake_case : List[str] = lm_model(input_ids=lowercase__ , decoder_input_ids=lowercase__ )
_snake_case : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase__ )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_snake_case : Optional[int] = shift_tokens_right(lowercase__ , 1 , 2 )
_snake_case : Optional[int] = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
_snake_case : Dict = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase (a__ , unittest.TestCase , a__ ):
_lowercase : Optional[Any] = True
_lowercase : List[str] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowercase : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Any = FlaxBlenderbotModelTester(self )
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : Dict = self._prepare_for_class(lowercase__ , lowercase__ )
_snake_case : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ , lowercase__=None , **lowercase__ ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest('''JIT Enabled''' ):
_snake_case : Tuple = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_snake_case : Tuple = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : Optional[int] = model_class(lowercase__ )
_snake_case : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_snake_case : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ , lowercase__ , lowercase__ ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest('''JIT Enabled''' ):
_snake_case : List[str] = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_snake_case : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_snake_case : Dict = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case : Union[str, Any] = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : str = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
_snake_case : Tuple = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_snake_case : Optional[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowercase__ )
_snake_case : int = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_snake_case : Any = ['''Sam''']
_snake_case : List[str] = tokenizer(lowercase__ , return_tensors='''jax''' )
_snake_case : Union[str, Any] = model.generate(**lowercase__ , **lowercase__ )
_snake_case : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
_snake_case : Dict = tokenizer.batch_decode(lowercase__ , **lowercase__ )
assert generated_txt[0].strip() == tgt_text
| 720
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
_snake_case : Tuple = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
_snake_case : Union[str, Any] = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_snake_case : Optional[Any] = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowerCAmelCase_ )
_snake_case : Union[str, Any] = []
for value in value_array:
_snake_case : Tuple = euclidean(lowerCAmelCase_ , dataset[0] )
_snake_case : Optional[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_snake_case : Optional[Any] = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
_snake_case : List[Any] = temp_dist
_snake_case : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase : Union[str, Any] = 'hf-internal-testing/tiny-random-bert'
UpperCAmelCase : int = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
UpperCAmelCase : Any = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : Dict = cached_file(lowercase__ , lowercase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowercase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowercase__ , lowercase__ ) ) )
with open(os.path.join(lowercase__ , '''refs''' , '''main''' ) ) as f:
_snake_case : List[str] = f.read()
self.assertEqual(lowercase__ , os.path.join(lowercase__ , '''snapshots''' , lowercase__ , lowercase__ ) )
self.assertTrue(os.path.isfile(lowercase__ ) )
# File is cached at the same place the second time.
_snake_case : Optional[int] = cached_file(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# Using a specific revision to test the full commit hash.
_snake_case : List[str] = cached_file(lowercase__ , lowercase__ , revision='''9b8c223''' )
self.assertEqual(lowercase__ , os.path.join(lowercase__ , '''snapshots''' , lowercase__ , lowercase__ ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(lowercase__ , '''is not a valid model identifier''' ):
_snake_case : Any = cached_file('''tiny-random-bert''' , lowercase__ )
with self.assertRaisesRegex(lowercase__ , '''is not a valid git identifier''' ):
_snake_case : Tuple = cached_file(lowercase__ , lowercase__ , revision='''aaaa''' )
with self.assertRaisesRegex(lowercase__ , '''does not appear to have a file named''' ):
_snake_case : Tuple = cached_file(lowercase__ , '''conf''' )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(lowercase__ , '''does not appear to have a file named''' ):
_snake_case : List[Any] = cached_file(lowercase__ , '''conf''' )
with open(os.path.join(lowercase__ , '''refs''' , '''main''' ) ) as f:
_snake_case : Optional[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowercase__ , '''.no_exist''' , lowercase__ , '''conf''' ) ) )
_snake_case : Any = cached_file(lowercase__ , '''conf''' , _raise_exceptions_for_missing_entries=lowercase__ )
self.assertIsNone(lowercase__ )
_snake_case : Union[str, Any] = cached_file(lowercase__ , '''conf''' , local_files_only=lowercase__ , _raise_exceptions_for_missing_entries=lowercase__ )
self.assertIsNone(lowercase__ )
_snake_case : List[Any] = mock.Mock()
_snake_case : Union[str, Any] = 500
_snake_case : Any = {}
_snake_case : Dict = HTTPError
_snake_case : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowercase__ ) as mock_head:
_snake_case : Dict = cached_file(lowercase__ , '''conf''' , _raise_exceptions_for_connection_errors=lowercase__ )
self.assertIsNone(lowercase__ )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowercase__ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowercase__ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , lowercase__ ) )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowercase__ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , lowercase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowercase__ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , lowercase__ , revision='''ahaha''' )
_snake_case : Any = get_file_from_repo('''bert-base-cased''' , lowercase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_snake_case : Optional[Any] = json.loads(open(lowercase__ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : Dict = Path(lowercase__ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(lowercase__ , '''a.txt''' ) , str(lowercase__ ) )
self.assertIsNone(get_file_from_repo(lowercase__ , '''b.txt''' ) )
| 700
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
UpperCAmelCase : Dict[Optional[str], str] = {}
UpperCAmelCase : Dict[Optional[str], Exception] = {}
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case : Tuple = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
_snake_case : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
_snake_case : Optional[Any] = format_type
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case : Tuple = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_snake_case : Dict = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
UpperCAmelCase : Tuple = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
UpperCAmelCase : Dict = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
UpperCAmelCase : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _a ( lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = get_format_type_from_alias(lowerCAmelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 701
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 0
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = tf.convert_to_tensor(lowerCAmelCase_ )
_snake_case : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = tf.convert_to_tensor(lowerCAmelCase_ )
_snake_case : str = tf.cast(math.pi , x.dtype )
_snake_case : Dict = tf.cast(0.044_715 , x.dtype )
_snake_case : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase_ , 3 )) ))
return x * cdf
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = tf.convert_to_tensor(lowerCAmelCase_ )
return x * tf.tanh(tf.math.softplus(lowerCAmelCase_ ) )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Any = tf.convert_to_tensor(lowerCAmelCase_ )
_snake_case : List[str] = tf.cast(0.044_715 , x.dtype )
_snake_case : Any = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = tf.convert_to_tensor(lowerCAmelCase_ )
_snake_case : Tuple = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.clip_by_value(_gelu(lowerCAmelCase_ ) , -10 , 10 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=-1 ):
"""simple docstring"""
_snake_case : Tuple = tf.split(lowerCAmelCase_ , 2 , axis=lowerCAmelCase_ )
return a * tf.math.sigmoid(lowerCAmelCase_ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.keras.activations.gelu(lowerCAmelCase_ , approximate=lowerCAmelCase_ )
UpperCAmelCase : int = tf.keras.activations.gelu
UpperCAmelCase : Any = approximate_gelu_wrap
else:
UpperCAmelCase : List[Any] = _gelu
UpperCAmelCase : Any = _gelu_new
UpperCAmelCase : int = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 702
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 47
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase : Dict = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Union[str, Any] = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
return sd
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
_snake_case : Union[str, Any] = OrderedDict()
_snake_case : Tuple = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_snake_case : Dict = key
for name_pair in rename_keys_prefix:
_snake_case : Optional[Any] = new_key.replace(name_pair[0] , name_pair[1] )
_snake_case : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_snake_case : Dict = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_snake_case : str = '''pretraining'''
if "vcr" in checkpoint_path:
_snake_case : List[str] = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
_snake_case : Optional[int] = {'''visual_embedding_dim''': 2_048}
elif "vqa" in checkpoint_path:
_snake_case : Tuple = {'''visual_embedding_dim''': 2_048}
elif "nlvr" in checkpoint_path:
_snake_case : Dict = {'''visual_embedding_dim''': 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_snake_case : str = {'''visual_embedding_dim''': 512}
_snake_case : Optional[int] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_snake_case : int = {'''visual_embedding_dim''': 2_048}
_snake_case : Dict = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_snake_case : Optional[int] = {'''visual_embedding_dim''': 2_048, '''num_labels''': 3_129}
_snake_case : Dict = '''vqa'''
elif "nlvr" in checkpoint_path:
_snake_case : int = {
'''visual_embedding_dim''': 1_024,
'''num_labels''': 2,
}
_snake_case : Optional[Any] = '''nlvr'''
_snake_case : str = VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
_snake_case : Union[str, Any] = load_state_dict(lowerCAmelCase_ )
_snake_case : Tuple = get_new_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if model_type == "pretraining":
_snake_case : List[str] = VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
_snake_case : Union[str, Any] = VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
_snake_case : Optional[Any] = VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
_snake_case : Optional[int] = VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 703
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
"""simple docstring"""
_snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_snake_case : str = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_snake_case : Union[str, Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 47
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
_snake_case : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_snake_case : Dict = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowerCAmelCase_ , output_all_encodings=lowerCAmelCase_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowerCAmelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_snake_case : int = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
_snake_case : str = os.path.join(get_home_dir() , '''models''' )
_snake_case : Optional[int] = _load_vocab(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls=lowerCAmelCase_ )
_snake_case : List[str] = nlp.model.BERTModel(
lowerCAmelCase_ , len(lowerCAmelCase_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowerCAmelCase_ , use_token_type_embed=lowerCAmelCase_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowerCAmelCase_ , use_decoder=lowerCAmelCase_ , )
original_bort.load_parameters(lowerCAmelCase_ , cast_dtype=lowerCAmelCase_ , ignore_extra=lowerCAmelCase_ )
_snake_case : List[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
_snake_case : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(lowerCAmelCase_ ),
}
_snake_case : Union[str, Any] = BertConfig.from_dict(lowerCAmelCase_ )
_snake_case : List[str] = BertForMaskedLM(lowerCAmelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Dict = hf_param.shape
_snake_case : Union[str, Any] = to_torch(params[gluon_param] )
_snake_case : str = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_snake_case : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
_snake_case : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
_snake_case : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
_snake_case : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_snake_case : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_snake_case : BertSelfAttention = layer.attention.self
_snake_case : List[str] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_snake_case : List[str] = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_snake_case : int = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_snake_case : List[Any] = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_snake_case : List[str] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_snake_case : Union[str, Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_snake_case : BertSelfOutput = layer.attention.output
_snake_case : Union[str, Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
_snake_case : str = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
_snake_case : Any = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_snake_case : Tuple = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_snake_case : BertIntermediate = layer.intermediate
_snake_case : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_snake_case : Tuple = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_snake_case : BertOutput = layer.output
_snake_case : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_snake_case : str = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_snake_case : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_snake_case : int = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_snake_case : Optional[Any] = RobertaTokenizer.from_pretrained('''roberta-base''' )
_snake_case : Union[str, Any] = tokenizer.encode_plus(lowerCAmelCase_ )['''input_ids''']
# Get gluon output
_snake_case : Tuple = mx.nd.array([input_ids] )
_snake_case : int = original_bort(inputs=lowerCAmelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase_ )
_snake_case : Tuple = BertModel.from_pretrained(lowerCAmelCase_ )
hf_bort_model.eval()
_snake_case : Dict = tokenizer.encode_plus(lowerCAmelCase_ , return_tensors='''pt''' )
_snake_case : List[str] = hf_bort_model(**lowerCAmelCase_ )[0]
_snake_case : Union[str, Any] = output_gluon[0].asnumpy()
_snake_case : Dict = output_hf[0].detach().numpy()
_snake_case : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_snake_case : int = np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 704
|
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = 0, 1
while True:
_snake_case , _snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : str = 3
_snake_case : Tuple = 250
_snake_case : int = ids_tensor((batch_size, length) , lowercase__ )
_snake_case : str = torch.ones((batch_size, length) , device=lowercase__ , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self._get_tensors(5 )
_snake_case : int = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowercase__ , lowercase__ ) )
_snake_case : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowercase__ , lowercase__ ) )
_snake_case : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(lowercase__ , lowercase__ ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = MaxLengthCriteria(max_length=10 )
_snake_case : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(lowercase__ , lowercase__ ) )
_snake_case : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(lowercase__ , lowercase__ ) )
_snake_case : str = self._get_tensors(10 )
self.assertTrue(criteria(lowercase__ , lowercase__ ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_snake_case : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(lowercase__ , lowercase__ ) )
_snake_case : Dict = self._get_tensors(9 )
self.assertFalse(criteria(lowercase__ , lowercase__ ) )
_snake_case : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowercase__ , lowercase__ ) )
_snake_case : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self._get_tensors(5 )
_snake_case : List[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowercase__ , lowercase__ ) )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowercase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_snake_case : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowercase__ ) , 1 )
| 705
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase (metaclass=a__ ):
_lowercase : Any = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : Union[str, Any] = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : Optional[Any] = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : List[str] = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : List[str] = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : str = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : Any = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> str:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : Optional[Any] = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> str:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : Dict = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : List[Any] = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : Any = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> str:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : List[str] = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class lowerCamelCase (metaclass=a__ ):
_lowercase : int = ["""flax"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
| 706
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if start is None:
_snake_case : Optional[Any] = 0
if end is None:
_snake_case : Any = len(lowerCAmelCase_ ) - 1
if start >= end:
return
_snake_case : Optional[Any] = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCAmelCase : List[str] = 3
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
_snake_case : Tuple = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
print('''Generating prime p...''' )
_snake_case : str = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
_snake_case : int = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
_snake_case : Tuple = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_snake_case : Any = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case : Optional[Any] = (key_size, e_a, e_a, p)
_snake_case : Optional[int] = (key_size, d)
return public_key, private_key
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
_snake_case : Tuple = generate_key(lowerCAmelCase_ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def _a ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
_snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean()
_snake_case : Tuple = -(labels.shape[-1] * loss.item())
_snake_case : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 47
| 0
|
def _a ( ):
"""simple docstring"""
_snake_case : Any = 0
for i in range(1 , 1_001 ):
total += i**i
return str(lowerCAmelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 708
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Any = torch.nn.Linear(10 , 10 )
_snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
_snake_case : List[str] = Accelerator()
_snake_case : Optional[Any] = accelerator.prepare(lowercase__ )
try:
pickle.loads(pickle.dumps(lowercase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : str = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCamelCase (a__ ):
_lowercase : Any = """roberta"""
def __init__( self , lowercase__=50_265 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
_snake_case : Optional[int] = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Any = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : int = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : List[str] = use_cache
_snake_case : int = classifier_dropout
class lowerCamelCase (a__ ):
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 709
|
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tuple[float, float, float]
UpperCAmelCase : int = tuple[float, float, float]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = end_pointa[0] - end_pointa[0]
_snake_case : Tuple = end_pointa[1] - end_pointa[1]
_snake_case : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ):
"""simple docstring"""
_snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Dict = 'Hello world! cécé herlolip'
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = FairseqRobertaModel.from_pretrained(lowerCAmelCase_ )
roberta.eval() # disable dropout
_snake_case : Optional[int] = roberta.model.encoder.sentence_encoder
_snake_case : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_snake_case : Dict = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase_ )
_snake_case : List[str] = XLMRobertaXLForSequenceClassification(lowerCAmelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_snake_case : int = roberta_sent_encoder.embed_tokens.weight
_snake_case : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
_snake_case : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_snake_case : List[Any] = roberta_sent_encoder.layer_norm.weight
_snake_case : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_snake_case : BertLayer = model.roberta.encoder.layer[i]
_snake_case : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_snake_case : RobertaAttention = layer.attention
_snake_case : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
_snake_case : List[str] = roberta_layer.self_attn_layer_norm.bias
# self attention
_snake_case : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_snake_case : Optional[Any] = roberta_layer.self_attn.q_proj.weight
_snake_case : int = roberta_layer.self_attn.q_proj.bias
_snake_case : Optional[int] = roberta_layer.self_attn.k_proj.weight
_snake_case : Any = roberta_layer.self_attn.k_proj.bias
_snake_case : List[Any] = roberta_layer.self_attn.v_proj.weight
_snake_case : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
_snake_case : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_snake_case : Optional[Any] = roberta_layer.self_attn.out_proj.weight
_snake_case : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_snake_case : str = roberta_layer.final_layer_norm.weight
_snake_case : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
_snake_case : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case : str = roberta_layer.fca.weight
_snake_case : List[Any] = roberta_layer.fca.bias
# output
_snake_case : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case : Optional[int] = roberta_layer.fca.weight
_snake_case : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
_snake_case : List[Any] = roberta.model.classification_heads['''mnli'''].dense.weight
_snake_case : Tuple = roberta.model.classification_heads['''mnli'''].dense.bias
_snake_case : int = roberta.model.classification_heads['''mnli'''].out_proj.weight
_snake_case : Any = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_snake_case : Tuple = roberta.model.encoder.lm_head.dense.weight
_snake_case : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias
_snake_case : int = roberta.model.encoder.lm_head.layer_norm.weight
_snake_case : Dict = roberta.model.encoder.lm_head.layer_norm.bias
_snake_case : int = roberta.model.encoder.lm_head.weight
_snake_case : Union[str, Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_snake_case : torch.Tensor = roberta.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1
_snake_case : Any = model(lowerCAmelCase_ )[0]
if classification_head:
_snake_case : Any = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase_ ) )
else:
_snake_case : Any = roberta.model(lowerCAmelCase_ )[0]
print(our_output.shape , their_output.shape )
_snake_case : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_snake_case : List[Any] = torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 710
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
if unlogit:
_snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ )
_snake_case : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
_snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : Dict = None
_snake_case : Dict = 0.0
_snake_case : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : Any = 2
_snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
_snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
_snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold )
_snake_case : int = torch.ones_like(lowerCAmelCase_ )
_snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : int = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Dict = float('''Inf''' )
_snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : str = new_head_mask.view_as(lowerCAmelCase_ )
_snake_case : Dict = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : Any = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : int = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : Tuple = 1 / loss
_snake_case : Dict = datetime.now() - before_time
_snake_case : List[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
_snake_case : List[str] = sum(p.numel() for p in model.parameters() )
_snake_case : int = datetime.now()
_snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
_snake_case : Optional[int] = 1 / loss
_snake_case : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : List[str] = torch.device('''cuda''' , args.local_rank )
_snake_case : int = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
_snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Prepare dataset
_snake_case : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),)
_snake_case : Tuple = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[str] = RandomSampler(lowerCAmelCase_ )
_snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 47
| 0
|
'''simple docstring'''
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase (a__ ):
_lowercase : Dict = """facebook/bart-large-mnli"""
_lowercase : Union[str, Any] = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_lowercase : Optional[Any] = """text_classifier"""
_lowercase : Dict = AutoTokenizer
_lowercase : Dict = AutoModelForSequenceClassification
_lowercase : str = ["""text""", ["""text"""]]
_lowercase : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
super().setup()
_snake_case : Dict = self.model.config
_snake_case : Union[str, Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_snake_case : List[Any] = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Tuple = labels
return self.pre_processor(
[text] * len(lowercase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
_snake_case : int = outputs.logits
_snake_case : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 711
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_snake_case : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : int = 2
while digits < n:
index += 1
_snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = filter(lambda lowerCAmelCase_ : p.requires_grad , model.parameters() )
_snake_case : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase : Dict = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if metric == "rouge2":
_snake_case : Optional[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_snake_case : Any = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_snake_case : Optional[int] = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
_snake_case : Union[str, Any] = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
_snake_case : List[str] = ModelCheckpoint(
dirpath=lowerCAmelCase_ , filename=lowerCAmelCase_ , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase_ , verbose=lowerCAmelCase_ , )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase__ )
@rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=True ) -> None:
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_snake_case : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_snake_case : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : str = od / '''test_results.txt'''
_snake_case : Union[str, Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_snake_case : Optional[int] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase__ )
generations_file.parent.mkdir(exist_ok=lowercase__ )
with open(lowercase__ , '''a+''' ) as writer:
for key in sorted(lowercase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : Optional[Any] = metrics[key]
if isinstance(lowercase__ , torch.Tensor ):
_snake_case : Optional[int] = val.item()
_snake_case : int = F'''{key}: {val:.6f}\n'''
writer.write(lowercase__ )
if not save_generations:
return
if "preds" in metrics:
_snake_case : Dict = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowercase__ )
@rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
try:
_snake_case : Dict = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : str = pl_module.model.num_parameters()
_snake_case : Any = count_trainable_parameters(lowercase__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Optional[int]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase__ , lowercase__ , '''test''' )
@rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 712
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 0
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if not sentence:
return ""
_snake_case : List[str] = dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return lst
_snake_case : Union[str, Any] = 1
while i < len(lowerCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_snake_case : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_snake_case : Optional[int] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : str = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
move_disk(lowerCAmelCase_ , lowerCAmelCase_ )
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
print('''moving disk from''' , lowerCAmelCase_ , '''to''' , lowerCAmelCase_ )
def _a ( ):
"""simple docstring"""
_snake_case : Any = int(input('''Height of hanoi: ''' ).strip() )
move_tower(lowerCAmelCase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 715
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : Any = do_resize
_snake_case : List[str] = do_rescale
_snake_case : Any = size_divisor
_snake_case : Optional[Any] = resample
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
_snake_case , _snake_case : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_snake_case : Optional[int] = height // size_divisor * size_divisor
_snake_case : Dict = width // size_divisor * size_divisor
_snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_snake_case : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_snake_case : Tuple = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
_snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase : str = 6_3_7_8_1_3_7.0
UpperCAmelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
UpperCAmelCase : Union[str, Any] = 6_3_7_8_1_3_7
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_snake_case : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_snake_case : Tuple = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_snake_case : List[str] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_snake_case : int = (b_lata + b_lata) / 2
_snake_case : Optional[int] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_snake_case : Dict = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_snake_case : Optional[Any] = cos(sigma / 2 ) ** 2
_snake_case : Optional[Any] = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_snake_case : Optional[Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_snake_case : List[str] = sin(sigma / 2 ) ** 2
_snake_case : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
while a != 0:
_snake_case : List[Any] = b % a, a
return b
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if gcd(lowerCAmelCase_ , lowerCAmelCase_ ) != 1:
_snake_case : List[Any] = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(lowerCAmelCase_ )
_snake_case : Any = 1, 0, a
_snake_case : str = 0, 1, m
while va != 0:
_snake_case : List[Any] = ua // va
_snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 718
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = """falcon"""
_lowercase : int = ["""past_key_values"""]
def __init__( self , lowercase__=65_024 , lowercase__=4_544 , lowercase__=32 , lowercase__=71 , lowercase__=1E-5 , lowercase__=0.02 , lowercase__=True , lowercase__=0.0 , lowercase__=0.0 , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=11 , lowercase__=11 , **lowercase__ , ) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
_snake_case : Union[str, Any] = kwargs.pop('''n_embed''' , lowercase__ )
_snake_case : str = hidden_size if n_embed is None else n_embed
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = layer_norm_epsilon
_snake_case : Optional[int] = initializer_range
_snake_case : Optional[Any] = use_cache
_snake_case : Optional[int] = hidden_dropout
_snake_case : List[Any] = attention_dropout
_snake_case : Dict = bos_token_id
_snake_case : List[str] = eos_token_id
_snake_case : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
_snake_case : Dict = alibi
_snake_case : List[Any] = new_decoder_architecture
_snake_case : Optional[int] = multi_query # Ignored when new_decoder_architecture is True
_snake_case : List[Any] = parallel_attn
_snake_case : Dict = bias
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 719
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase (a__ ):
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self , lowercase__ , lowercase__ ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ = 1 , lowercase__ = 2_000 , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , **lowercase__ , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_snake_case : Optional[int] = self.unet.config.sample_size
_snake_case : int = (batch_size, 3, img_size, img_size)
_snake_case : Dict = self.unet
_snake_case : List[str] = randn_tensor(lowercase__ , generator=lowercase__ ) * self.scheduler.init_noise_sigma
_snake_case : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(lowercase__ )
self.scheduler.set_sigmas(lowercase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_snake_case : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_snake_case : Dict = self.unet(lowercase__ , lowercase__ ).sample
_snake_case : Dict = self.scheduler.step_correct(lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample
# prediction step
_snake_case : int = model(lowercase__ , lowercase__ ).sample
_snake_case : List[Any] = self.scheduler.step_pred(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ )
_snake_case : List[Any] = output.prev_sample, output.prev_sample_mean
_snake_case : List[str] = sample_mean.clamp(0 , 1 )
_snake_case : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case : Any = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase__ )
| 720
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ = 10 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or n < 0:
raise ValueError('''Invalid input''' )
_snake_case : Tuple = 10**n
_snake_case : Any = 28_433 * (pow(2 , 7_830_457 , lowerCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 721
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47
| 0
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = np.argmax(lowerCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
with open(lowerCAmelCase_ , encoding='''utf_8''' ) as f:
_snake_case : Dict = csv.reader(lowerCAmelCase_ )
_snake_case : Tuple = []
next(lowerCAmelCase_ ) # skip the first line
for line in tqdm(lowerCAmelCase_ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
for dataset in encoded_datasets:
_snake_case : List[Any] = len(lowerCAmelCase_ )
_snake_case : int = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_snake_case : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
_snake_case : Optional[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_snake_case : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCAmelCase_ ):
_snake_case : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_snake_case : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_snake_case : List[Any] = with_conta
_snake_case : Dict = with_conta
_snake_case : str = len(lowerCAmelCase_ ) - 1
_snake_case : List[Any] = len(lowerCAmelCase_ ) - 1
_snake_case : Optional[Any] = with_conta
_snake_case : List[Any] = with_conta
_snake_case : Optional[Any] = mc_label
_snake_case : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def _a ( ):
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase_ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=lowerCAmelCase_ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=lowerCAmelCase_ , default='''''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--num_train_epochs''' , type=lowerCAmelCase_ , default=3 )
parser.add_argument('''--train_batch_size''' , type=lowerCAmelCase_ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=lowerCAmelCase_ , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowerCAmelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=lowerCAmelCase_ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=lowerCAmelCase_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=lowerCAmelCase_ , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowerCAmelCase_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=lowerCAmelCase_ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=lowerCAmelCase_ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=lowerCAmelCase_ , default=0.9 )
parser.add_argument('''--n_valid''' , type=lowerCAmelCase_ , default=374 )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[int] = parser.parse_args()
print(lowerCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_snake_case : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_snake_case : Dict = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_snake_case : Optional[Any] = ['''_start_''', '''_delimiter_''', '''_classify_''']
_snake_case : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCAmelCase_ )
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
_snake_case : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
model.to(lowerCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCAmelCase_ ) )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return obj
return [tokenize_and_encode(lowerCAmelCase_ ) for o in obj]
logger.info('''Encoding dataset...''' )
_snake_case : Optional[Any] = load_rocstories_dataset(args.train_dataset )
_snake_case : List[Any] = load_rocstories_dataset(args.eval_dataset )
_snake_case : Dict = (train_dataset, eval_dataset)
_snake_case : Optional[Any] = tokenize_and_encode(lowerCAmelCase_ )
# Compute the max input length for the Transformer
_snake_case : Tuple = model.config.n_positions // 2 - 2
_snake_case : Optional[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_snake_case : Optional[int] = min(lowerCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_snake_case : int = pre_process_datasets(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
_snake_case : Dict = tensor_datasets[0], tensor_datasets[1]
_snake_case : str = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[Any] = RandomSampler(lowerCAmelCase_ )
_snake_case : List[Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.train_batch_size )
_snake_case : str = TensorDataset(*lowerCAmelCase_ )
_snake_case : Any = SequentialSampler(lowerCAmelCase_ )
_snake_case : str = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_snake_case : Optional[int] = args.max_steps
_snake_case : Optional[int] = args.max_steps // (len(lowerCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
_snake_case : Dict = len(lowerCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
_snake_case : Union[str, Any] = list(model.named_parameters() )
_snake_case : str = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
_snake_case : Union[str, Any] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
_snake_case : Any = AdamW(lowerCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
_snake_case : str = get_linear_schedule_with_warmup(
lowerCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCAmelCase_ )
if args.do_train:
_snake_case : Optional[int] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
_snake_case : str = 0
_snake_case : List[str] = 0
_snake_case : Tuple = tqdm(lowerCAmelCase_ , desc='''Training''' )
for step, batch in enumerate(lowerCAmelCase_ ):
_snake_case : Any = tuple(t.to(lowerCAmelCase_ ) for t in batch )
_snake_case : Optional[Any] = batch
_snake_case : Union[str, Any] = model(lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
_snake_case : List[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_snake_case : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_snake_case : int = '''Training loss: {:.2e} lr: {:.2e}'''.format(lowerCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_snake_case : Optional[int] = model.module if hasattr(lowerCAmelCase_ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_snake_case : Dict = os.path.join(args.output_dir , lowerCAmelCase_ )
_snake_case : Dict = os.path.join(args.output_dir , lowerCAmelCase_ )
torch.save(model_to_save.state_dict() , lowerCAmelCase_ )
model_to_save.config.to_json_file(lowerCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_snake_case : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_snake_case : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCAmelCase_ )
if args.do_eval:
model.eval()
_snake_case : Tuple = 0, 0
_snake_case : str = 0, 0
for batch in tqdm(lowerCAmelCase_ , desc='''Evaluating''' ):
_snake_case : Any = tuple(t.to(lowerCAmelCase_ ) for t in batch )
_snake_case : Dict = batch
with torch.no_grad():
_snake_case : List[Any] = model(
lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
_snake_case : int = mc_logits.detach().cpu().numpy()
_snake_case : Any = mc_labels.to('''cpu''' ).numpy()
_snake_case : Optional[Any] = accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_snake_case : Optional[int] = eval_loss / nb_eval_steps
_snake_case : Optional[int] = eval_accuracy / nb_eval_examples
_snake_case : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
_snake_case : Tuple = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
_snake_case : str = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(lowerCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCAmelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 700
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase : List[str] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
UpperCAmelCase : Dict = None
def _a ( ):
"""simple docstring"""
_snake_case : int = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=lowerCAmelCase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowerCAmelCase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_snake_case : Union[str, Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
def remove_articles(lowerCAmelCase_ ):
return ARTICLES_REGEX.sub(''' ''' , lowerCAmelCase_ )
def white_space_fix(lowerCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ ):
_snake_case : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if not s:
return []
return normalize_answer(lowerCAmelCase_ ).split()
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = get_tokens(lowerCAmelCase_ )
_snake_case : int = get_tokens(lowerCAmelCase_ )
_snake_case : str = collections.Counter(lowerCAmelCase_ ) & collections.Counter(lowerCAmelCase_ )
_snake_case : List[Any] = sum(common.values() )
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_snake_case : Union[str, Any] = 1.0 * num_same / len(lowerCAmelCase_ )
_snake_case : Any = 1.0 * num_same / len(lowerCAmelCase_ )
_snake_case : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_snake_case : str = qa['''id''']
_snake_case : List[Any] = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowerCAmelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_snake_case : Any = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
_snake_case : Any = preds[qid]
# Take max over all gold answers
_snake_case : Union[str, Any] = max(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for a in gold_answers )
_snake_case : Dict = max(compute_fa(lowerCAmelCase_ , lowerCAmelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = {}
for qid, s in scores.items():
_snake_case : Any = na_probs[qid] > na_prob_thresh
if pred_na:
_snake_case : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
_snake_case : int = s
return new_scores
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
if not qid_list:
_snake_case : List[Any] = len(lowerCAmelCase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
_snake_case : Tuple = len(lowerCAmelCase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for k in new_eval:
_snake_case : Dict = new_eval[k]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
plt.step(lowerCAmelCase_ , lowerCAmelCase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(lowerCAmelCase_ , lowerCAmelCase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase_ )
plt.savefig(lowerCAmelCase_ )
plt.clf()
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case : Union[str, Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : na_probs[k] )
_snake_case : Any = 0.0
_snake_case : Optional[Any] = 1.0
_snake_case : str = 0.0
_snake_case : List[str] = [1.0]
_snake_case : str = [0.0]
_snake_case : Optional[Any] = 0.0
for i, qid in enumerate(lowerCAmelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_snake_case : str = true_pos / float(i + 1 )
_snake_case : List[str] = true_pos / float(lowerCAmelCase_ )
if i == len(lowerCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase_ )
recalls.append(lowerCAmelCase_ )
if out_image:
plot_pr_curve(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return {"ap": 100.0 * avg_prec}
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if out_image_dir and not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
_snake_case : str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_snake_case : List[str] = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
_snake_case : Any = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
_snake_case : Any = {k: float(lowerCAmelCase_ ) for k, v in qid_to_has_ans.items()}
_snake_case : Optional[int] = make_precision_recall_eval(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''pr_exact''' )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''pr_f1''' )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''pr_oracle''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if not qid_list:
return
_snake_case : int = [na_probs[k] for k in qid_list]
_snake_case : List[Any] = np.ones_like(lowerCAmelCase_ ) / float(len(lowerCAmelCase_ ) )
plt.hist(lowerCAmelCase_ , weights=lowerCAmelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(lowerCAmelCase_ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_snake_case : str = num_no_ans
_snake_case : int = cur_score
_snake_case : Union[str, Any] = 0.0
_snake_case : List[str] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_snake_case : Optional[Any] = scores[qid]
else:
if preds[qid]:
_snake_case : Optional[Any] = -1
else:
_snake_case : str = 0
cur_score += diff
if cur_score > best_score:
_snake_case : int = cur_score
_snake_case : int = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase_ ), best_thresh
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : str = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : str = best_exact
_snake_case : Any = exact_thresh
_snake_case : Union[str, Any] = best_fa
_snake_case : List[str] = fa_thresh
def _a ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
_snake_case : str = json.load(lowerCAmelCase_ )
_snake_case : List[str] = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
_snake_case : List[Any] = json.load(lowerCAmelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_snake_case : List[Any] = json.load(lowerCAmelCase_ )
else:
_snake_case : List[Any] = {k: 0.0 for k in preds}
_snake_case : int = make_qid_to_has_ans(lowerCAmelCase_ ) # maps qid to True/False
_snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
_snake_case : Any = [k for k, v in qid_to_has_ans.items() if not v]
_snake_case : Union[str, Any] = get_raw_scores(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh )
_snake_case : Dict = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh )
_snake_case : Any = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ )
if has_ans_qids:
_snake_case : Optional[int] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_ )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''HasAns''' )
if no_ans_qids:
_snake_case : int = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_ )
merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
else:
print(json.dumps(lowerCAmelCase_ , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 701
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 702
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : Optional[int] = set({'''(''', '''[''', '''{'''} )
_snake_case : List[Any] = set({''')''', ''']''', '''}'''} )
_snake_case : Tuple = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowerCAmelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase_ ) == 0
def _a ( ):
"""simple docstring"""
_snake_case : Dict = input('''Enter sequence of brackets: ''' )
if is_balanced(lowerCAmelCase_ ):
print(lowerCAmelCase_ , '''is balanced''' )
else:
print(lowerCAmelCase_ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 703
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
"""simple docstring"""
_snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_snake_case : str = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_snake_case : Union[str, Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 47
| 0
|
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if not len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
_snake_case : Optional[Any] = equationa
_snake_case : Tuple = equationa
# Calculate the determinants of the matrices
_snake_case : List[Any] = aa * ba - aa * ba
_snake_case : Dict = ca * ba - ca * ba
_snake_case : int = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case : Optional[int] = determinant_x / determinant
_snake_case : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 704
|
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = 0, 1
while True:
_snake_case , _snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
import os
from distutils.util import strtobool
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for e in env_keys:
_snake_case : List[str] = int(os.environ.get(lowerCAmelCase_ , -1 ) )
if val >= 0:
return val
return default
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : List[Any] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return strtobool(lowerCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _a ( lowerCAmelCase_ , lowerCAmelCase_="no" ):
"""simple docstring"""
_snake_case : List[str] = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return value
| 705
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase : Optional[int] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
_snake_case : int = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCAmelCase_ , )
is not None
):
_snake_case : str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_snake_case : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_snake_case : Optional[Any] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
_snake_case : str = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
_snake_case : List[Any] = True
if not attribute_used:
_snake_case : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_snake_case : Dict = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_snake_case : Union[str, Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_snake_case : Tuple = True
elif attribute.endswith('''_token_id''' ):
_snake_case : Any = True
# configuration class specific cases
if not case_allowed:
_snake_case : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_snake_case : Union[str, Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Any = dict(inspect.signature(config_class.__init__ ).parameters )
_snake_case : Dict = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
_snake_case : Optional[int] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_snake_case : List[str] = {}
if len(config_class.attribute_map ) > 0:
_snake_case : Optional[Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_snake_case : Union[str, Any] = inspect.getsourcefile(lowerCAmelCase_ )
_snake_case : Union[str, Any] = os.path.dirname(lowerCAmelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_snake_case : str = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for fn in os.listdir(lowerCAmelCase_ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
_snake_case : Optional[Any] = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as fp:
modeling_sources.append(fp.read() )
_snake_case : Dict = []
for config_param, default_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
# `attributes` here is all the variant names for `config_param`
_snake_case : Any = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase_ )
def _a ( ):
"""simple docstring"""
_snake_case : Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_snake_case : Tuple = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase_ : inspect.isclass(lowerCAmelCase_ )
and issubclass(lowerCAmelCase_ , lowerCAmelCase_ )
and inspect.getmodule(lowerCAmelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_snake_case : Dict = check_config_attributes_being_used(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_snake_case : str = unused_attributes
if len(lowerCAmelCase_ ) > 0:
_snake_case : Optional[Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 706
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if start is None:
_snake_case : Optional[Any] = 0
if end is None:
_snake_case : Any = len(lowerCAmelCase_ ) - 1
if start >= end:
return
_snake_case : Optional[Any] = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase : Any = 5_0_0_0_3
UpperCAmelCase : Optional[Any] = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (a__ , unittest.TestCase ):
_lowercase : Any = PLBartTokenizer
_lowercase : Optional[Any] = None
_lowercase : List[str] = False
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Dict = PLBartTokenizer(lowercase__ , language_codes='''base''' , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = PLBartTokenizer(lowercase__ , language_codes='''base''' , keep_accents=lowercase__ )
_snake_case : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_snake_case : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_snake_case : Any = tokenizer.vocab_size
_snake_case : int = [tokenizer.convert_ids_to_tokens(lowercase__ ) for x in range(end - 4 , lowercase__ )]
self.assertListEqual(lowercase__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
_snake_case : Tuple = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_snake_case : Dict = tokenizer(lowercase__ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ ) , lowercase__ , )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = PLBartTokenizer(lowercase__ , language_codes='''multi''' , keep_accents=lowercase__ )
_snake_case : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_snake_case : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_snake_case : Any = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_snake_case : List[Any] = tokenizer.vocab_size
_snake_case : Any = [tokenizer.convert_ids_to_tokens(lowercase__ ) for x in range(end - 7 , lowercase__ )]
self.assertListEqual(
lowercase__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
_snake_case : Optional[int] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_snake_case : str = tokenizer(lowercase__ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ ) , lowercase__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
_lowercase : Union[str, Any] = """uclanlp/plbart-python-en_XX"""
_lowercase : Optional[int] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
_lowercase : List[Any] = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
_lowercase : List[Any] = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase_ ( cls ) -> List[str]:
"""simple docstring"""
_snake_case : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
_snake_case : Optional[Any] = 1
return cls
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertIn(lowercase__ , self.tokenizer.all_special_ids )
_snake_case : Optional[Any] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
_snake_case : Any = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
_snake_case : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertNotIn(self.tokenizer.eos_token , lowercase__ )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : Tuple = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , lowercase__ )
_snake_case : int = 10
_snake_case : int = self.tokenizer(lowercase__ , max_length=lowercase__ , truncation=lowercase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase__ )
self.assertEqual(len(lowercase__ ) , lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = tempfile.mkdtemp()
_snake_case : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase__ )
_snake_case : List[Any] = PLBartTokenizer.from_pretrained(lowercase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase__ )
@require_torch
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors='''pt''' )
_snake_case : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_snake_case : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_snake_case : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Dict = self.tokenizer(self.src_text , padding=lowercase__ , truncation=lowercase__ , max_length=3 , return_tensors='''pt''' )
_snake_case : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=10 , return_tensors='''pt''' )
_snake_case : Optional[int] = targets['''input_ids''']
_snake_case : Optional[Any] = shift_tokens_right(lowercase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(lowercase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 707
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
_snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean()
_snake_case : Tuple = -(labels.shape[-1] * loss.item())
_snake_case : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 47
| 0
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 708
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Any = torch.nn.Linear(10 , 10 )
_snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
_snake_case : List[str] = Accelerator()
_snake_case : Optional[Any] = accelerator.prepare(lowercase__ )
try:
pickle.loads(pickle.dumps(lowercase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tuple[float, float, float]
UpperCAmelCase : int = tuple[float, float, float]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = end_pointa[0] - end_pointa[0]
_snake_case : Tuple = end_pointa[1] - end_pointa[1]
_snake_case : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ):
"""simple docstring"""
_snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Any = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
if unlogit:
_snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ )
_snake_case : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
_snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : Dict = None
_snake_case : Dict = 0.0
_snake_case : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : Any = 2
_snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
_snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
_snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold )
_snake_case : int = torch.ones_like(lowerCAmelCase_ )
_snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : int = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Dict = float('''Inf''' )
_snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : str = new_head_mask.view_as(lowerCAmelCase_ )
_snake_case : Dict = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : Any = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : int = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : Tuple = 1 / loss
_snake_case : Dict = datetime.now() - before_time
_snake_case : List[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
_snake_case : List[str] = sum(p.numel() for p in model.parameters() )
_snake_case : int = datetime.now()
_snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
_snake_case : Optional[int] = 1 / loss
_snake_case : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : List[str] = torch.device('''cuda''' , args.local_rank )
_snake_case : int = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
_snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Prepare dataset
_snake_case : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),)
_snake_case : Tuple = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[str] = RandomSampler(lowerCAmelCase_ )
_snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 47
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Tuple = 1_6
UpperCAmelCase : Dict = 3_2
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_snake_case : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : Optional[Any] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Tuple = 16
elif accelerator.mixed_precision != "no":
_snake_case : Any = 8
else:
_snake_case : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_snake_case : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_snake_case : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : Dict = mocked_dataloaders # noqa: F811
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1":
_snake_case : Optional[Any] = 2
# New Code #
_snake_case : List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
_snake_case : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config['''lr''']
_snake_case : Optional[int] = int(config['''num_epochs'''] )
_snake_case : Dict = int(config['''seed'''] )
_snake_case : Optional[Any] = int(config['''batch_size'''] )
_snake_case : List[str] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowerCAmelCase_ )
_snake_case : Any = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Any = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_snake_case : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case : Tuple = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = model(**lowerCAmelCase_ )
_snake_case : Optional[Any] = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Union[str, Any] = model(**lowerCAmelCase_ )
_snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
_snake_case : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ )
def _a ( ):
"""simple docstring"""
_snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_snake_case : List[Any] = parser.parse_args()
_snake_case : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 711
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_snake_case : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : int = 2
while digits < n:
index += 1
_snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Optional[int] = 1_6
UpperCAmelCase : Any = 3_2
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ):
"""simple docstring"""
_snake_case : str = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_snake_case : Tuple = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : Optional[Any] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : List[str] = 16
elif accelerator.mixed_precision != "no":
_snake_case : List[Any] = 8
else:
_snake_case : List[Any] = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_snake_case : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ )
_snake_case : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Dict = config['''lr''']
_snake_case : Tuple = int(config['''num_epochs'''] )
_snake_case : List[str] = int(config['''seed'''] )
_snake_case : Tuple = int(config['''batch_size'''] )
_snake_case : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_snake_case : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_snake_case : Tuple = batch_size // MAX_GPU_BATCH_SIZE
_snake_case : Dict = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
_snake_case : int = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Any = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case : List[str] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : Union[str, Any] = model(**lowerCAmelCase_ )
_snake_case : Dict = outputs.loss
_snake_case : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**lowerCAmelCase_ )
_snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
_snake_case : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_snake_case : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ )
def _a ( ):
"""simple docstring"""
_snake_case : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_snake_case : str = parser.parse_args()
_snake_case : Dict = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = len(lowerCAmelCase_ )
_snake_case : List[Any] = len(lowerCAmelCase_ )
_snake_case : Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_snake_case : List[Any] = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_snake_case : List[str] = True
if a[i].islower():
_snake_case : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47
| 0
|
'''simple docstring'''
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
_snake_case : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCAmelCase_ )
_snake_case : Optional[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
_snake_case : str = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
_snake_case : Union[str, Any] = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_snake_case : Tuple = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_snake_case : int = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
_snake_case : Optional[Any] = f'''layers_{str(lowerCAmelCase_ )}'''
# Self-Attention
_snake_case : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
_snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
_snake_case : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
_snake_case : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
_snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
_snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_snake_case : str = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_snake_case : int = flax_model.params['''encoder''']['''block'''][str(lowerCAmelCase_ )]['''layer''']
_snake_case : str = tax_attention_key
_snake_case : Dict = tax_attention_out
_snake_case : Optional[Any] = tax_attention_query
_snake_case : int = tax_attention_value
_snake_case : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_snake_case : Tuple = tax_global_layer_norm
if split_mlp_wi:
_snake_case : List[Any] = tax_mlp_wi_a
_snake_case : Any = tax_mlp_wi_a
else:
_snake_case : int = tax_mlp_wi
_snake_case : Any = tax_mlp_wo
_snake_case : Optional[Any] = tax_mlp_layer_norm
_snake_case : Optional[Any] = flax_model_encoder_layer_block
# Only for layer 0:
_snake_case : Optional[Any] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
_snake_case : List[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_snake_case : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
_snake_case : str = tax_encoder_global_rel_embedding
# Assigning
_snake_case : Tuple = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
_snake_case : str = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_snake_case : Union[str, Any] = f'''layers_{str(lowerCAmelCase_ )}'''
# Self-Attention
_snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
_snake_case : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
_snake_case : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
_snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
_snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
_snake_case : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
_snake_case : Union[str, Any] = tax_enc_dec_attention_module['''key''']['''kernel''']
_snake_case : Optional[int] = tax_enc_dec_attention_module['''out''']['''kernel''']
_snake_case : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
_snake_case : List[str] = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
_snake_case : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
_snake_case : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_snake_case : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_snake_case : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_snake_case : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_snake_case : Optional[int] = flax_model.params['''decoder''']['''block'''][str(lowerCAmelCase_ )]['''layer''']
_snake_case : Tuple = tax_attention_key
_snake_case : Union[str, Any] = tax_attention_out
_snake_case : Union[str, Any] = tax_attention_query
_snake_case : Tuple = tax_attention_value
_snake_case : List[str] = tax_pre_attention_layer_norm
_snake_case : List[Any] = tax_enc_dec_attention_key
_snake_case : Optional[int] = tax_enc_dec_attention_out
_snake_case : List[str] = tax_enc_dec_attention_query
_snake_case : List[str] = tax_enc_dec_attention_value
_snake_case : List[str] = tax_cross_layer_norm
if split_mlp_wi:
_snake_case : List[Any] = tax_mlp_wi_a
_snake_case : List[Any] = tax_mlp_wi_a
else:
_snake_case : Any = tax_mlp_wi
_snake_case : Dict = tax_mlp_wo
_snake_case : int = txa_mlp_layer_norm
_snake_case : Any = flax_model_decoder_layer_block
# Decoder Normalization
_snake_case : Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
_snake_case : List[Any] = txa_decoder_norm
# Only for layer 0:
_snake_case : Dict = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
_snake_case : int = tax_decoder_rel_embedding
# Token Embeddings
_snake_case : Any = tax_model['''target''']['''token_embedder''']['''embedding''']
_snake_case : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_snake_case : Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowerCAmelCase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCAmelCase : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 715
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : Any = do_resize
_snake_case : List[str] = do_rescale
_snake_case : Any = size_divisor
_snake_case : Optional[Any] = resample
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
_snake_case , _snake_case : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_snake_case : Optional[int] = height // size_divisor * size_divisor
_snake_case : Dict = width // size_divisor * size_divisor
_snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_snake_case : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_snake_case : Tuple = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
_snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 47
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int:
"""simple docstring"""
_snake_case : Any = parent
_snake_case : Any = batch_size
_snake_case : List[Any] = seq_length
_snake_case : Optional[int] = is_training
_snake_case : str = use_input_mask
_snake_case : Union[str, Any] = use_token_type_ids
_snake_case : Union[str, Any] = use_labels
_snake_case : List[str] = vocab_size
_snake_case : Union[str, Any] = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Any = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : int = type_vocab_size
_snake_case : Union[str, Any] = type_sequence_label_size
_snake_case : Dict = initializer_range
_snake_case : Any = num_labels
_snake_case : List[str] = num_choices
_snake_case : List[Any] = scope
_snake_case : Dict = vocab_size - 1
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Dict = None
if self.use_input_mask:
_snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = self.prepare_config_and_inputs()
_snake_case : List[str] = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : List[Any] = GPTNeoXModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : List[str] = model(lowercase__ , attention_mask=lowercase__ )
_snake_case : List[str] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = True
_snake_case : int = GPTNeoXModel(lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : str = model(lowercase__ , attention_mask=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = GPTNeoXForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : List[str] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = self.num_labels
_snake_case : Dict = GPTNeoXForQuestionAnswering(lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : int = model(lowercase__ , attention_mask=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Any = GPTNeoXForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = self.num_labels
_snake_case : List[Any] = GPTNeoXForTokenClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_snake_case : Any = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : Dict = True
_snake_case : Dict = GPTNeoXForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# first forward pass
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case : int = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case : Any = model(lowercase__ , attention_mask=lowercase__ , output_hidden_states=lowercase__ )
_snake_case : Dict = output_from_no_past['''hidden_states'''][0]
_snake_case : int = model(
lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )['''hidden_states'''][0]
# select random slice
_snake_case : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case : str = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case : int = config_and_inputs
_snake_case : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (a__ , a__ , a__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : Optional[int] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_lowercase : Union[str, Any] = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[Any] = False
_lowercase : Optional[Any] = False
_lowercase : Optional[int] = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[int] = GPTNeoXModelTester(self )
_snake_case : Any = ConfigTester(self , config_class=lowercase__ , hidden_size=64 , num_attention_heads=8 )
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_decoder()
_snake_case : Dict = None
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ , lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[str] = ids_tensor([1, 10] , config.vocab_size )
_snake_case : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case : Tuple = GPTNeoXModel(lowercase__ )
original_model.to(lowercase__ )
original_model.eval()
_snake_case : Tuple = original_model(lowercase__ ).last_hidden_state
_snake_case : Any = original_model(lowercase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case : str = {'''type''': scaling_type, '''factor''': 10.0}
_snake_case : int = GPTNeoXModel(lowercase__ )
scaled_model.to(lowercase__ )
scaled_model.eval()
_snake_case : List[str] = scaled_model(lowercase__ ).last_hidden_state
_snake_case : Dict = scaled_model(lowercase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) )
@require_torch
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
_snake_case : Tuple = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase__ )
_snake_case : List[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowercase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
_snake_case : Union[str, Any] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
_snake_case : Optional[Any] = model.generate(**lowercase__ , do_sample=lowercase__ , max_new_tokens=20 )
_snake_case : List[Any] = tokenizer.batch_decode(lowercase__ )[0]
self.assertEqual(lowercase__ , lowercase__ )
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCAmelCase : Tuple = logging.get_logger(__name__)
@dataclass
class lowerCamelCase (a__ ):
_lowercase : Any = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **lowercase__ ) -> List[Any]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase__ , not kwargs.pop(lowercase__ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_snake_case : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript )
_snake_case : Dict = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
_snake_case : Tuple = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowercase__ )
_lowercase : bool = field(default=a__ , metadata={"""help""": """Trace the models using torchscript"""} )
_lowercase : bool = field(default=a__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
_lowercase : str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def UpperCAmelCase_ ( self ) -> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
_snake_case : List[str] = torch.device('''cpu''' )
_snake_case : int = 0
elif is_torch_tpu_available():
_snake_case : Tuple = xm.xla_device()
_snake_case : int = 0
else:
_snake_case : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_snake_case : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase_ ( self ) -> "torch.device":
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.n_gpu > 0
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase (yaml.SafeLoader ):
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
_snake_case : str = [tuple(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else key for key in keys]
_snake_case : str = Counter(lowercase__ )
_snake_case : List[str] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__=False ) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = super().construct_mapping(lowercase__ , deep=lowercase__ )
self._check_no_duplicates_on_constructed_node(lowercase__ )
return mapping
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_snake_case : int = full_content[1:].index('''---''' ) + 1
_snake_case : Optional[Any] = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class lowerCamelCase (a__ ):
# class attributes
_lowercase : Tuple = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ ) -> "DatasetMetadata":
"""simple docstring"""
with open(lowercase__ , encoding='''utf-8''' ) as readme_file:
_snake_case : Tuple = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase__ )
else:
return cls()
def UpperCAmelCase_ ( self , lowercase__ ) -> Dict:
"""simple docstring"""
if path.exists():
with open(lowercase__ , encoding='''utf-8''' ) as readme_file:
_snake_case : Any = readme_file.read()
else:
_snake_case : int = None
_snake_case : Optional[int] = self._to_readme(lowercase__ )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ = None ) -> str:
"""simple docstring"""
if readme_content is not None:
_snake_case : Any = _split_yaml_from_readme(lowercase__ )
_snake_case : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_snake_case : int = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ ) -> "DatasetMetadata":
"""simple docstring"""
_snake_case : Optional[int] = yaml.load(lowercase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_snake_case : Optional[Any] = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase__ , allow_unicode=lowercase__ , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCAmelCase : Optional[Any] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Optional[Any] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCAmelCase : Optional[int] = ap.parse_args()
UpperCAmelCase : int = Path(args.readme_filepath)
UpperCAmelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 718
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase_ ):
_snake_case : List[Any] = row[0]
for column_index, column in enumerate(lowerCAmelCase_ ):
if magnitude == 0:
_snake_case : Dict = column
continue
_snake_case : Tuple = column / magnitude
# Subtract to cancel term
_snake_case : Any = current_set[0]
_snake_case : Optional[Any] = [first_row]
_snake_case : List[Any] = current_set[1::]
for row in current_set:
_snake_case : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase_ )
continue
for column_index in range(len(lowerCAmelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_snake_case : int = final_set[0]
_snake_case : List[Any] = []
_snake_case : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_snake_case : int = simplify(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase_ )
_snake_case : Union[str, Any] = resultant
return final_set
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_snake_case : int = len(lowerCAmelCase_ ) + 1
if any(len(lowerCAmelCase_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(lowerCAmelCase_ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(lowerCAmelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_snake_case : Tuple = equations.copy()
if any(0 in row for row in data_set ):
_snake_case : List[str] = data_set.copy()
_snake_case : str = []
for row_index, row in enumerate(lowerCAmelCase_ ):
if 0 not in row:
_snake_case : Optional[int] = data_set.pop(lowerCAmelCase_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , lowerCAmelCase_ )
_snake_case : List[str] = data_set.copy()
_snake_case : Any = simplify(lowerCAmelCase_ )
_snake_case : str = simplified[::-1]
_snake_case : list = []
for row in simplified:
_snake_case : List[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_snake_case : int = row.copy()[: len(lowerCAmelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase_ ) == 0:
solutions.append(0 )
continue
_snake_case : str = temp_row[1::]
_snake_case : Any = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase_ )
_snake_case : Dict = []
for item in solutions:
final.append(float(round(lowerCAmelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 720
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[str] = 384
if "tiny" in model_name:
_snake_case : Optional[int] = [3, 3, 9, 3]
_snake_case : Any = [96, 192, 384, 768]
if "small" in model_name:
_snake_case : Tuple = [3, 3, 27, 3]
_snake_case : Optional[int] = [96, 192, 384, 768]
if "base" in model_name:
_snake_case : str = [3, 3, 27, 3]
_snake_case : Any = [128, 256, 512, 1_024]
_snake_case : Optional[Any] = 512
if "large" in model_name:
_snake_case : Optional[int] = [3, 3, 27, 3]
_snake_case : List[str] = [192, 384, 768, 1_536]
_snake_case : Tuple = 768
if "xlarge" in model_name:
_snake_case : Optional[int] = [3, 3, 27, 3]
_snake_case : Union[str, Any] = [256, 512, 1_024, 2_048]
_snake_case : Optional[int] = 1_024
# set label information
_snake_case : Union[str, Any] = 150
_snake_case : Dict = '''huggingface/label-files'''
_snake_case : Optional[int] = '''ade20k-id2label.json'''
_snake_case : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_snake_case : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
_snake_case : List[str] = ConvNextConfig(
depths=lowerCAmelCase_ , hidden_sizes=lowerCAmelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_snake_case : str = UperNetConfig(
backbone_config=lowerCAmelCase_ , auxiliary_in_channels=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , )
return config
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Any = dct.pop(lowerCAmelCase_ )
_snake_case : int = val
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
_snake_case : Optional[Any] = model_name_to_url[model_name]
_snake_case : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' )['''state_dict''']
_snake_case : Dict = get_upernet_config(lowerCAmelCase_ )
_snake_case : List[str] = UperNetForSemanticSegmentation(lowerCAmelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_snake_case : Optional[int] = state_dict.pop(lowerCAmelCase_ )
if "bn" in key:
_snake_case : List[Any] = key.replace('''bn''' , '''batch_norm''' )
_snake_case : Any = val
# rename keys
_snake_case : Optional[int] = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# verify on image
_snake_case : Optional[int] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
_snake_case : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' )
_snake_case : Any = SegformerImageProcessor()
_snake_case : int = processor(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
_snake_case : Tuple = model(lowerCAmelCase_ )
if model_name == "upernet-convnext-tiny":
_snake_case : List[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_snake_case : Dict = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_snake_case : str = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_snake_case : Union[str, Any] = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_snake_case : List[Any] = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47
| 0
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase : Dict = 'true'
def _a ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=16 ):
"""simple docstring"""
set_seed(42 )
_snake_case : List[Any] = RegressionModel()
_snake_case : int = deepcopy(lowerCAmelCase_ )
_snake_case : Optional[int] = RegressionDataset(length=lowerCAmelCase_ )
_snake_case : Optional[Any] = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
model.to(accelerator.device )
_snake_case : str = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return model, ddp_model, dataloader
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
_snake_case : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowerCAmelCase_ ):
_snake_case : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
with accelerator.main_process_first():
_snake_case : int = dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
_snake_case : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
if use_longest:
return tokenizer.pad(lowerCAmelCase_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowerCAmelCase_ , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=16 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = Accelerator(dispatch_batches=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case : Dict = get_dataloader(lowerCAmelCase_ , not dispatch_batches )
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowerCAmelCase_ )
_snake_case : Tuple = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = []
for batch in dataloader:
_snake_case : int = batch.values()
with torch.no_grad():
_snake_case : List[str] = model(lowerCAmelCase_ )
_snake_case : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_snake_case : Union[str, Any] = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase_ )
targs.append(lowerCAmelCase_ )
_snake_case : Optional[int] = torch.cat(lowerCAmelCase_ ), torch.cat(lowerCAmelCase_ )
return logits, targs
def _a ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=16 ):
"""simple docstring"""
_snake_case : Optional[Any] = get_basic_setup(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[int] = generate_predictions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert (
len(lowerCAmelCase_ ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_ )}'''
def _a ( lowerCAmelCase_ = False , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case : Any = evaluate.load('''glue''' , '''mrpc''' )
_snake_case : List[str] = get_mrpc_setup(lowerCAmelCase_ , lowerCAmelCase_ )
# First do baseline
_snake_case : List[Any] = setup['''no''']
model.to(lowerCAmelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase_ )
with torch.inference_mode():
_snake_case : Optional[Any] = model(**lowerCAmelCase_ )
_snake_case : Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase_ , references=batch['''labels'''] )
_snake_case : Optional[int] = metric.compute()
# Then do distributed
_snake_case : Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_snake_case : Dict = model(**lowerCAmelCase_ )
_snake_case : Tuple = outputs.logits.argmax(dim=-1 )
_snake_case : Union[str, Any] = batch['''labels''']
_snake_case : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
_snake_case : Optional[int] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_snake_case : List[Any] = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowerCAmelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
_snake_case : Optional[int] = Accelerator()
test_torch_metrics(lowerCAmelCase_ , 512 )
accelerator.state._reset_state()
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 700
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = ''''''
for i in table:
res += inp[i - 1]
return res
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return data[1:] + data[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ''''''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = int('''0b''' + data[0] + data[-1] , 2 )
_snake_case : Union[str, Any] = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = message[:4]
_snake_case : List[Any] = message[4:]
_snake_case : List[Any] = apply_table(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = xor(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[Any] = apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741
_snake_case : List[str] = apply_sbox(lowerCAmelCase_ , temp[4:] )
_snake_case : int = '''0''' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
_snake_case : Dict = '''0''' * (2 - len(lowerCAmelCase_ )) + r
_snake_case : List[Any] = apply_table(l + r , lowerCAmelCase_ )
_snake_case : Dict = xor(lowerCAmelCase_ , lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = input('Enter 10 bit key: ')
UpperCAmelCase : Union[str, Any] = input('Enter 8 bit message: ')
UpperCAmelCase : str = [6, 3, 7, 4, 8, 5, 1_0, 9]
UpperCAmelCase : str = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
UpperCAmelCase : Dict = [2, 4, 3, 1]
UpperCAmelCase : str = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase : Union[str, Any] = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase : List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase : Dict = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase : Optional[int] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase : Dict = apply_table(key, paa_table)
UpperCAmelCase : Optional[int] = temp[:5]
UpperCAmelCase : Dict = temp[5:]
UpperCAmelCase : int = left_shift(left)
UpperCAmelCase : str = left_shift(right)
UpperCAmelCase : Dict = apply_table(left + right, pa_table)
UpperCAmelCase : str = left_shift(left)
UpperCAmelCase : str = left_shift(right)
UpperCAmelCase : List[str] = left_shift(left)
UpperCAmelCase : Union[str, Any] = left_shift(right)
UpperCAmelCase : Optional[Any] = apply_table(left + right, pa_table)
# encryption
UpperCAmelCase : Tuple = apply_table(message, IP)
UpperCAmelCase : Dict = function(expansion, sa, sa, keya, temp)
UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
UpperCAmelCase : List[Any] = apply_table(CT, IP)
UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
UpperCAmelCase : Optional[int] = temp[4:] + temp[:4]
UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
UpperCAmelCase : Optional[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 701
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 0
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase : Union[str, Any] = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class lowerCamelCase :
def __init__( self , lowercase__ = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_snake_case : Any = primes[group]['''prime''']
_snake_case : Union[str, Any] = primes[group]['''generator''']
_snake_case : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Tuple = pow(self.generator , self.__private_key , self.prime )
return hex(lowercase__ )[2:]
def UpperCAmelCase_ ( self , lowercase__ ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(lowercase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase_ ( self , lowercase__ ) -> str:
"""simple docstring"""
_snake_case : str = int(lowercase__ , base=16 )
if not self.is_valid_public_key(lowercase__ ):
raise ValueError('''Invalid public key''' )
_snake_case : int = pow(lowercase__ , self.__private_key , self.prime )
return shaaaa(str(lowercase__ ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowercase__ , (prime - 1) // 2 , lowercase__ ) == 1
)
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ = 14 ) -> str:
"""simple docstring"""
_snake_case : List[Any] = int(lowercase__ , base=16 )
_snake_case : str = int(lowercase__ , base=16 )
_snake_case : Union[str, Any] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowercase__ , lowercase__ ):
raise ValueError('''Invalid public key''' )
_snake_case : List[str] = pow(lowercase__ , lowercase__ , lowercase__ )
return shaaaa(str(lowercase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 47
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_snake_case : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_snake_case : Optional[Any] = 4
_snake_case : Tuple = 48
_snake_case : Optional[Any] = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_snake_case : Any = [6, 6, 6, 6]
_snake_case : Optional[Any] = 60
_snake_case : Optional[Any] = [6, 6, 6, 6]
_snake_case : str = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_snake_case : Optional[int] = 4
_snake_case : Any = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_snake_case : str = 1
_snake_case : List[Any] = 1
_snake_case : Optional[int] = 126
_snake_case : List[str] = 7
_snake_case : List[str] = 255.0
_snake_case : Optional[int] = ''''''
return config
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
_snake_case : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_snake_case : Optional[Any] = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
_snake_case : List[str] = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
_snake_case : Union[str, Any] = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
_snake_case : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_snake_case : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_snake_case : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_snake_case : List[str] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_snake_case : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_snake_case : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
_snake_case : Optional[Any] = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
_snake_case : Any = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
_snake_case : Tuple = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
_snake_case : Optional[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
_snake_case : Dict = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
_snake_case : List[Any] = '''layernorm.weight'''
if name == "norm.bias":
_snake_case : Optional[Any] = '''layernorm.bias'''
if "conv_first" in name:
_snake_case : Optional[int] = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_snake_case : int = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_snake_case : List[Any] = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
_snake_case : int = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
_snake_case : List[str] = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
_snake_case : List[Any] = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
_snake_case : Union[str, Any] = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
_snake_case : List[Any] = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
_snake_case : int = '''swin2sr.''' + name
return name
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : Optional[int] = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
_snake_case : List[Any] = key.split('''.''' )
_snake_case : str = int(key_split[1] )
_snake_case : List[str] = int(key_split[4] )
_snake_case : List[Any] = config.embed_dim
if "weight" in key:
_snake_case : Any = val[:dim, :]
_snake_case : Any = val[dim : dim * 2, :]
_snake_case : Any = val[-dim:, :]
else:
_snake_case : Any = val[:dim]
_snake_case : Optional[Any] = val[dim : dim * 2]
_snake_case : Optional[int] = val[-dim:]
pass
else:
_snake_case : Dict = val
return orig_state_dict
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = get_config(lowerCAmelCase_ )
_snake_case : Optional[Any] = SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
_snake_case : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' )
_snake_case : int = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_snake_case : List[str] = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
_snake_case : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' )
_snake_case : List[Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_snake_case : Optional[Any] = 126 if '''Jpeg''' in checkpoint_url else 256
_snake_case : Tuple = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_snake_case : Union[str, Any] = transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
_snake_case : Optional[int] = pixel_values[:, 0, :, :].unsqueeze(1 )
_snake_case : Optional[int] = model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_snake_case : Tuple = torch.Size([1, 3, 512, 512] )
_snake_case : Union[str, Any] = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_snake_case : Dict = torch.Size([1, 3, 1_024, 1_024] )
_snake_case : Dict = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_snake_case : Tuple = torch.Size([1, 3, 1_024, 1_024] )
_snake_case : Any = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_snake_case : List[str] = torch.Size([1, 3, 512, 512] )
_snake_case : Optional[Any] = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_snake_case : Dict = torch.Size([1, 3, 1_024, 1_024] )
_snake_case : str = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCAmelCase_ , atol=1E-3 )
print('''Looks ok!''' )
_snake_case : Dict = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
_snake_case : Optional[Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
UpperCAmelCase : Dict = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
"""simple docstring"""
_snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase_ )
DownloadCommand.register_subcommand(lowerCAmelCase_ )
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
RunCommand.register_subcommand(lowerCAmelCase_ )
ServeCommand.register_subcommand(lowerCAmelCase_ )
UserCommands.register_subcommand(lowerCAmelCase_ )
AddNewModelCommand.register_subcommand(lowerCAmelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ )
LfsCommands.register_subcommand(lowerCAmelCase_ )
PTtoTFCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_snake_case : str = parser.parse_args()
if not hasattr(lowerCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
_snake_case : Union[str, Any] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 47
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : str = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = 0, 1
while True:
_snake_case , _snake_case : List[str] = b, a + b
yield b
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
def __init__( self , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Dict:
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : List[str] = []
_snake_case : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_snake_case : List[Any] = parent.find_all(child.name , recursive=lowercase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowercase__ ) else next(i for i, s in enumerate(lowercase__ , 1 ) if s is child ) )
_snake_case : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = BeautifulSoup(lowercase__ , '''html.parser''' )
_snake_case : List[str] = []
_snake_case : Tuple = []
_snake_case : Any = []
for element in html_code.descendants:
if type(lowercase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_snake_case : str = html.unescape(lowercase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowercase__ )
_snake_case : List[str] = self.xpath_soup(lowercase__ )
stringaxtag_seq.append(lowercase__ )
stringaxsubs_seq.append(lowercase__ )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Tuple = ''''''
for tagname, subs in zip(lowercase__ , lowercase__ ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self , lowercase__ ) -> BatchFeature:
"""simple docstring"""
_snake_case : Dict = False
# Check that strings has a valid type
if isinstance(lowercase__ , lowercase__ ):
_snake_case : str = True
elif isinstance(lowercase__ , (list, tuple) ):
if len(lowercase__ ) == 0 or isinstance(html_strings[0] , lowercase__ ):
_snake_case : Optional[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(lowercase__ )}.''' )
_snake_case : Any = bool(isinstance(lowercase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowercase__ )) )
if not is_batched:
_snake_case : Optional[Any] = [html_strings]
# Get nodes + xpaths
_snake_case : Any = []
_snake_case : str = []
for html_string in html_strings:
_snake_case : Union[str, Any] = self.get_three_from_single(lowercase__ )
nodes.append(lowercase__ )
_snake_case : Union[str, Any] = []
for node, tag_list, sub_list in zip(lowercase__ , lowercase__ , lowercase__ ):
_snake_case : Union[str, Any] = self.construct_xpath(lowercase__ , lowercase__ )
xpath_strings.append(lowercase__ )
xpaths.append(lowercase__ )
# return as Dict
_snake_case : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
_snake_case : Tuple = BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
return encoded_inputs
| 705
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
class lowerCamelCase :
def __init__( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : List[str] = size
_snake_case : Optional[int] = [0] * size
_snake_case : Optional[int] = [0] * size
@staticmethod
def UpperCAmelCase_ ( lowercase__ ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase_ ( lowercase__ ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Optional[Any] = value
while index < self.size:
_snake_case : Optional[int] = self.get_prev(lowercase__ ) + 1
if current_left_border == index:
_snake_case : Optional[int] = value
else:
_snake_case : Dict = max(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Tuple = self.get_next(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
_snake_case : Dict = 0
while left <= right:
_snake_case : Tuple = self.get_prev(lowercase__ )
if left <= current_left:
_snake_case : Union[str, Any] = max(lowercase__ , self.tree[right] )
_snake_case : Optional[int] = current_left
else:
_snake_case : str = max(lowercase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if start is None:
_snake_case : Optional[Any] = 0
if end is None:
_snake_case : Any = len(lowerCAmelCase_ ) - 1
if start >= end:
return
_snake_case : Optional[Any] = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase_ , int(b / 2 ) ) * actual_power(lowerCAmelCase_ , int(b / 2 ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCAmelCase_ , lowerCAmelCase_ )
return actual_power(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 707
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
_snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean()
_snake_case : Tuple = -(labels.shape[-1] * loss.item())
_snake_case : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 47
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[Any] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Any = torch.nn.Linear(10 , 10 )
_snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
_snake_case : List[str] = Accelerator()
_snake_case : Optional[Any] = accelerator.prepare(lowercase__ )
try:
pickle.loads(pickle.dumps(lowercase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 47
| 0
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 709
|
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tuple[float, float, float]
UpperCAmelCase : int = tuple[float, float, float]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = end_pointa[0] - end_pointa[0]
_snake_case : Tuple = end_pointa[1] - end_pointa[1]
_snake_case : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ):
"""simple docstring"""
_snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
| 47
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase (metaclass=a__ ):
_lowercase : str = ["""note_seq"""]
def __init__( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 710
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
if unlogit:
_snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ )
_snake_case : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
_snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : Dict = None
_snake_case : Dict = 0.0
_snake_case : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : Any = 2
_snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
_snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
_snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold )
_snake_case : int = torch.ones_like(lowerCAmelCase_ )
_snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : int = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Dict = float('''Inf''' )
_snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : str = new_head_mask.view_as(lowerCAmelCase_ )
_snake_case : Dict = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : Any = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : int = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : Tuple = 1 / loss
_snake_case : Dict = datetime.now() - before_time
_snake_case : List[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
_snake_case : List[str] = sum(p.numel() for p in model.parameters() )
_snake_case : int = datetime.now()
_snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
_snake_case : Optional[int] = 1 / loss
_snake_case : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : List[str] = torch.device('''cuda''' , args.local_rank )
_snake_case : int = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
_snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Prepare dataset
_snake_case : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),)
_snake_case : Tuple = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[str] = RandomSampler(lowerCAmelCase_ )
_snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[Any] = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_snake_case : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : int = 2
while digits < n:
index += 1
_snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = [False] * len(lowerCAmelCase_ )
_snake_case : Tuple = []
queue.append(lowerCAmelCase_ )
_snake_case : Any = True
while queue:
_snake_case : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase_ )
_snake_case : Optional[Any] = True
_snake_case : List[str] = u
return visited[t]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = [-1] * (len(lowerCAmelCase_ ))
_snake_case : List[str] = 0
while bfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[Any] = float('''Inf''' )
_snake_case : List[str] = sink
while s != source:
# Find the minimum value in select path
_snake_case : Optional[int] = min(lowerCAmelCase_ , graph[parent[s]][s] )
_snake_case : Union[str, Any] = parent[s]
max_flow += path_flow
_snake_case : Optional[int] = sink
while v != source:
_snake_case : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case : List[Any] = parent[v]
return max_flow
UpperCAmelCase : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase : Union[str, Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 712
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 0
|
'''simple docstring'''
from math import pow, sqrt
def _a ( *lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = len(lowerCAmelCase_ ) > 0 and all(value > 0.0 for value in values )
return result
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 713
|
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47
| 0
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : int = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : int = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
UpperCAmelCase : List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
UpperCAmelCase : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
UpperCAmelCase : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowerCamelCase (a__ ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCamelCase (a__ ):
_lowercase : Any = VOCAB_FILES_NAMES
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCAmelCase : Union[str, Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCAmelCase : str = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a__ )
class lowerCamelCase :
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
elif titles is None or texts is None:
_snake_case : Optional[Any] = titles if texts is None else texts
return super().__call__(
lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
_snake_case : Optional[Any] = titles if not isinstance(lowercase__ , lowercase__ ) else [titles]
_snake_case : int = texts if not isinstance(lowercase__ , lowercase__ ) else [texts]
_snake_case : Optional[int] = len(lowercase__ )
_snake_case : Tuple = questions if not isinstance(lowercase__ , lowercase__ ) else [questions] * n_passages
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowercase__ )} titles and {len(lowercase__ )} texts.''' )
_snake_case : int = super().__call__(lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ )['''input_ids''']
_snake_case : List[str] = super().__call__(lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ )['''input_ids''']
_snake_case : str = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase__ , lowercase__ )
]
}
if return_attention_mask is not False:
_snake_case : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_snake_case : int = attention_mask
return self.pad(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = 16 , lowercase__ = 64 , lowercase__ = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_snake_case : Optional[int] = reader_input['''input_ids''']
_snake_case : Union[str, Any] = reader_output[:3]
_snake_case : List[Any] = len(lowercase__ )
_snake_case : Union[str, Any] = sorted(range(lowercase__ ) , reverse=lowercase__ , key=relevance_logits.__getitem__ )
_snake_case : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_snake_case : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_snake_case : Union[str, Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_snake_case : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
_snake_case : int = len(lowercase__ )
_snake_case : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase__ , top_spans=lowercase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase__ , start_index=lowercase__ , end_index=lowercase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_snake_case : Union[str, Any] = []
for start_index, start_score in enumerate(lowercase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_snake_case : str = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ )
_snake_case : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_snake_case : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class lowerCamelCase (a__ , a__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : Optional[int] = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[Any] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase (a__ ):
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Tuple = SMALL_MODEL_IDENTIFIER
_snake_case : Optional[int] = '''pt'''
_snake_case : Optional[Any] = '''tf'''
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase__ )
model_tf.save_pretrained(lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : List[str] = '''mock_framework'''
# Framework provided - return whatever the user provides
_snake_case : Tuple = FeaturesManager.determine_framework(self.test_model , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
_snake_case : Optional[Any] = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
_snake_case : Tuple = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
_snake_case : Dict = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
_snake_case : Optional[int] = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowercase__ ):
_snake_case : Union[str, Any] = FeaturesManager.determine_framework(lowercase__ )
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
with patch('''transformers.onnx.features.is_tf_available''' , lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_snake_case : Optional[Any] = MagicMock(return_value=lowercase__ )
with patch('''transformers.onnx.features.is_torch_available''' , lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_tf )
# Both in environment -> use PyTorch
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
_snake_case : str = MagicMock(return_value=lowercase__ )
with patch('''transformers.onnx.features.is_tf_available''' , lowercase__ ), patch(
'''transformers.onnx.features.is_torch_available''' , lowercase__ ):
_snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# Both not in environment -> raise error
_snake_case : Dict = MagicMock(return_value=lowercase__ )
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
with patch('''transformers.onnx.features.is_tf_available''' , lowercase__ ), patch(
'''transformers.onnx.features.is_torch_available''' , lowercase__ ):
with self.assertRaises(lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 715
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (a__ ):
_lowercase : int = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None:
"""simple docstring"""
_snake_case : Any = do_resize
_snake_case : List[str] = do_rescale
_snake_case : Any = size_divisor
_snake_case : Optional[Any] = resample
super().__init__(**lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
_snake_case , _snake_case : Dict = get_image_size(lowercase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_snake_case : Optional[int] = height // size_divisor * size_divisor
_snake_case : Dict = width // size_divisor * size_divisor
_snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
return image
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_snake_case : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_snake_case : Tuple = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images]
if do_resize:
_snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images]
if do_rescale:
_snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
_snake_case : List[str] = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 47
| 0
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = """MCTCTFeatureExtractor"""
_lowercase : str = """AutoTokenizer"""
def __init__( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
super().__init__(lowercase__ , lowercase__ )
_snake_case : List[str] = self.feature_extractor
_snake_case : Any = False
def __call__( self , *lowercase__ , **lowercase__ ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase__ , **lowercase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_snake_case : List[str] = kwargs.pop('''raw_speech''' )
else:
_snake_case : str = kwargs.pop('''audio''' , lowercase__ )
_snake_case : Dict = kwargs.pop('''sampling_rate''' , lowercase__ )
_snake_case : Optional[int] = kwargs.pop('''text''' , lowercase__ )
if len(lowercase__ ) > 0:
_snake_case : Any = args[0]
_snake_case : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_snake_case : Tuple = self.feature_extractor(lowercase__ , *lowercase__ , sampling_rate=lowercase__ , **lowercase__ )
if text is not None:
_snake_case : str = self.tokenizer(lowercase__ , **lowercase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case : Dict = encodings['''input_ids''']
return inputs
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase__ , **lowercase__ )
_snake_case : int = kwargs.pop('''input_features''' , lowercase__ )
_snake_case : Tuple = kwargs.pop('''labels''' , lowercase__ )
if len(lowercase__ ) > 0:
_snake_case : Union[str, Any] = args[0]
_snake_case : List[Any] = args[1:]
if input_features is not None:
_snake_case : Union[str, Any] = self.feature_extractor.pad(lowercase__ , *lowercase__ , **lowercase__ )
if labels is not None:
_snake_case : Union[str, Any] = self.tokenizer.pad(lowercase__ , **lowercase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_snake_case : List[Any] = labels['''input_ids''']
return input_features
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@contextmanager
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_snake_case : List[str] = True
_snake_case : Union[str, Any] = self.tokenizer
yield
_snake_case : Optional[int] = self.feature_extractor
_snake_case : Union[str, Any] = False
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = {}
with open(lowerCAmelCase_ , '''r''' ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
_snake_case : Any = line.strip()
if line:
_snake_case : List[Any] = line.split()
_snake_case : List[Any] = line_number
_snake_case : Optional[int] = words[0]
_snake_case : int = value
return result
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_snake_case : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
_snake_case : Optional[int] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_snake_case : Dict = '''param'''
if weight_type is not None and weight_type != "param":
_snake_case : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
_snake_case : int = hf_pointer
for attribute in hf_param_name.split('''.''' ):
_snake_case : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[int] = shape_pointer.shape
# let's reduce dimension
_snake_case : Optional[int] = value[0]
else:
_snake_case : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_snake_case : Any = value
elif weight_type == "weight_g":
_snake_case : Dict = value
elif weight_type == "weight_v":
_snake_case : Dict = value
elif weight_type == "bias":
_snake_case : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
_snake_case : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[Any] = value
else:
_snake_case : Tuple = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
_snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
_snake_case : Dict = '''param'''
if weight_type is not None and weight_type != "param":
_snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_snake_case : int = '''.'''.join([key, hf_param_name] )
else:
_snake_case : List[str] = key
_snake_case : Optional[int] = value if '''lm_head''' in full_key else value[0]
UpperCAmelCase : Optional[int] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case : Optional[Any] = False
for key, mapped_key in MAPPING.items():
_snake_case : List[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Any = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
_snake_case : str = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
_snake_case : str = '''weight_g'''
elif "weight_v" in name:
_snake_case : List[Any] = '''weight_v'''
elif "bias" in name:
_snake_case : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : int = '''weight'''
else:
_snake_case : Optional[int] = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = []
_snake_case : Optional[int] = fairseq_model.state_dict()
_snake_case : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
_snake_case : Optional[int] = True
else:
_snake_case : Optional[Any] = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[str] = full_name.split('''conv_layers.''' )[-1]
_snake_case : Tuple = name.split('''.''' )
_snake_case : int = int(items[0] )
_snake_case : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_snake_case : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_snake_case : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_snake_case : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_snake_case : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ):
"""simple docstring"""
if config_path is not None:
_snake_case : Dict = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
_snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
_snake_case : List[str] = read_txt_into_dict(lowerCAmelCase_ )
_snake_case : Optional[Any] = idalabel
_snake_case : Dict = WavaVecaForSequenceClassification(lowerCAmelCase_ )
_snake_case : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
_snake_case : int = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Optional[int] = target_dict.pad_index
_snake_case : Tuple = target_dict.bos_index
_snake_case : Dict = target_dict.eos_index
_snake_case : Dict = len(target_dict.symbols )
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case : Any = 0
_snake_case : List[str] = 1
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : str = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , )
_snake_case : Any = True if config.feat_extract_norm == '''layer''' else False
_snake_case : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
_snake_case : Any = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_snake_case : List[Any] = WavaVecaForCTC(lowerCAmelCase_ )
else:
_snake_case : str = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
_snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_snake_case : Tuple = argparse.Namespace(task='''audio_pretraining''' )
_snake_case : str = fairseq.tasks.setup_task(lowerCAmelCase_ )
_snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
_snake_case : Any = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Optional[int] = parser.parse_args()
UpperCAmelCase : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 718
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47
| 0
|
'''simple docstring'''
import math
def _a ( lowerCAmelCase_ = 100 ):
"""simple docstring"""
_snake_case : List[str] = sum(i * i for i in range(1 , n + 1 ) )
_snake_case : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 719
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 47
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ )
_snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ )
_snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : Any = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_snake_case : str = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
_snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
_snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
| 720
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 47
| 0
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCAmelCase : Optional[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCAmelCase : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
UpperCAmelCase : List[Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ) -> str:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_snake_case : Dict = np.array([re.sub(lowercase__ , '''''' , lowercase__ ) for x in predictions] )
_snake_case : Optional[int] = np.array([re.sub(lowercase__ , '''''' , lowercase__ ) for x in references] )
else:
_snake_case : Tuple = np.asarray(lowercase__ )
_snake_case : Dict = np.asarray(lowercase__ )
if ignore_case:
_snake_case : Dict = np.char.lower(lowercase__ )
_snake_case : Dict = np.char.lower(lowercase__ )
if ignore_punctuation:
_snake_case : Optional[Any] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
_snake_case : int = np.char.translate(lowercase__ , table=lowercase__ )
_snake_case : Tuple = np.char.translate(lowercase__ , table=lowercase__ )
if ignore_numbers:
_snake_case : Any = string.digits.maketrans('''''' , '''''' , string.digits )
_snake_case : Optional[Any] = np.char.translate(lowercase__ , table=lowercase__ )
_snake_case : str = np.char.translate(lowercase__ , table=lowercase__ )
_snake_case : str = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 100}
| 721
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Tuple = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : str = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_snake_case : Union[str, Any] = 0
_snake_case : int = Path(self.hparams.output_dir )
_snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_snake_case : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_snake_case : PretrainedConfig = config
_snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_snake_case : PreTrainedTokenizer = tokenizer
_snake_case : Any = MODEL_MODES[mode]
if model is None:
_snake_case : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_snake_case : Optional[Any] = model
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler]
_snake_case : Optional[int] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = self.model
_snake_case : List[Any] = ['''bias''', '''LayerNorm.weight''']
_snake_case : List[str] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
_snake_case : Any = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_snake_case : List[str] = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_snake_case : List[str] = optimizer
_snake_case : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
return self.validation_step(lowercase__ , lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple:
"""simple docstring"""
return self.validation_end(lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , lowercase__ ) -> Any:
"""simple docstring"""
if stage == "test":
_snake_case : Any = len(self.test_dataloader().dataset )
else:
_snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ )
_snake_case : Optional[int] = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' )
_snake_case : Tuple = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase (pl.Callback ):
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
_snake_case : Any = trainer.lr_schedulers[0]['''scheduler''']
_snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
_snake_case : Dict = trainer.callback_metrics
# Log and save results to file
_snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase__ , '''w''' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_snake_case : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
_snake_case : Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase_ )
if logging_callback is None:
_snake_case : str = LoggingCallback()
_snake_case : Tuple = {}
if args.fpaa:
_snake_case : Union[str, Any] = 16
if args.gpus > 1:
_snake_case : Optional[Any] = '''auto'''
_snake_case : Tuple = '''ddp'''
_snake_case : Optional[Any] = args.accumulate_grad_batches
_snake_case : Tuple = None
_snake_case : str = '''auto'''
_snake_case : int = pl.Trainer.from_argparse_args(
lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , )
if args.do_train:
trainer.fit(lowerCAmelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Dict = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase : List[str] = logging.getLogger(__name__)
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if os.path.exists(lowerCAmelCase_ ):
if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[Any] = 2
if unlogit:
_snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ )
_snake_case : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) )
for row in range(len(lowerCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
if head_mask is None:
_snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : Dict = None
_snake_case : Dict = 0.0
_snake_case : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs )
(_snake_case ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase_ ):
_snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : Any = 2
_snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
_snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ )
print_ad_tensor(lowerCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ )
_snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold )
_snake_case : int = torch.ones_like(lowerCAmelCase_ )
_snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : int = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Dict = float('''Inf''' )
_snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : str = new_head_mask.view_as(lowerCAmelCase_ )
_snake_case : Dict = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase_ )
# Compute metric and head importance again
_snake_case : Any = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : int = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = datetime.now()
_snake_case : Union[str, Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
_snake_case : Tuple = 1 / loss
_snake_case : Dict = datetime.now() - before_time
_snake_case : List[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = [
v,
]
assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase_ )
_snake_case : List[str] = sum(p.numel() for p in model.parameters() )
_snake_case : int = datetime.now()
_snake_case : Optional[Any] = compute_heads_importance(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , )
_snake_case : Optional[int] = 1 / loss
_snake_case : Dict = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(lowerCAmelCase_ , args.output_dir )
def _a ( ):
"""simple docstring"""
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
_snake_case : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : List[str] = torch.device('''cuda''' , args.local_rank )
_snake_case : int = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ )
elif args.n_gpu > 1:
_snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ )
# Prepare dataset
_snake_case : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),)
_snake_case : Tuple = TensorDataset(*lowerCAmelCase_ )
_snake_case : List[str] = RandomSampler(lowerCAmelCase_ )
_snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 701
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = 1
_snake_case : Union[str, Any] = 3
_snake_case : List[Any] = (32, 32)
_snake_case : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase__ )
return image
@property
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
_snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_snake_case : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(lowercase__ )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_snake_case : Union[str, Any] = self.dummy_cond_unet_upscale
_snake_case : List[Any] = DDPMScheduler()
_snake_case : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_snake_case : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : List[str] = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_snake_case : int = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=350 , )
_snake_case : Tuple = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
_snake_case : Union[str, Any] = '''A painting of a squirrel eating a burger'''
_snake_case : Any = torch.Generator(device=lowercase__ ).manual_seed(0 )
_snake_case : int = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_snake_case : Dict = output.images
_snake_case : Optional[int] = torch.Generator(device=lowercase__ ).manual_seed(0 )
_snake_case : List[str] = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=lowercase__ , )[0]
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
_snake_case : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_snake_case : int = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_snake_case : Dict = self.dummy_cond_unet_upscale
_snake_case : Any = DDPMScheduler()
_snake_case : List[str] = DDIMScheduler(prediction_type='''v_prediction''' )
_snake_case : Optional[Any] = self.dummy_vae
_snake_case : List[str] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_snake_case : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : Optional[Any] = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_snake_case : str = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=350 , )
_snake_case : List[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
_snake_case : str = '''A painting of a squirrel eating a burger'''
_snake_case : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_snake_case : Any = output.images
assert image.shape[0] == 2
_snake_case : str = torch.Generator(device=lowercase__ ).manual_seed(0 )
_snake_case : Optional[Any] = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_snake_case : str = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : str = self.dummy_cond_unet_upscale
_snake_case : Dict = DDPMScheduler()
_snake_case : int = DDIMScheduler(prediction_type='''v_prediction''' )
_snake_case : Dict = self.dummy_vae
_snake_case : Optional[int] = self.dummy_text_encoder
_snake_case : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_snake_case : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : Dict = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_snake_case : Tuple = unet.half()
_snake_case : int = text_encoder.half()
# make sure here that pndm scheduler skips prk
_snake_case : Optional[Any] = StableDiffusionUpscalePipeline(
unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , max_noise_level=350 , )
_snake_case : List[Any] = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
_snake_case : List[Any] = '''A painting of a squirrel eating a burger'''
_snake_case : int = torch.manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , image=lowercase__ , generator=lowercase__ , num_inference_steps=2 , output_type='''np''' , ).images
_snake_case : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_snake_case : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
_snake_case : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
_snake_case : Tuple = StableDiffusionUpscalePipeline.from_pretrained(lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
_snake_case : Union[str, Any] = '''a cat sitting on a park bench'''
_snake_case : Any = torch.manual_seed(0 )
_snake_case : Tuple = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='''np''' , )
_snake_case : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_snake_case : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
_snake_case : int = '''stabilityai/stable-diffusion-x4-upscaler'''
_snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ , torch_dtype=torch.floataa , )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
_snake_case : List[str] = '''a cat sitting on a park bench'''
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : str = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , output_type='''np''' , )
_snake_case : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_snake_case : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
_snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowercase__ , torch_dtype=torch.floataa , )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case : str = '''a cat sitting on a park bench'''
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : List[str] = pipe(
prompt=lowercase__ , image=lowercase__ , generator=lowercase__ , num_inference_steps=5 , output_type='''np''' , )
_snake_case : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 702
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 47
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.