code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase : List[Any] = TypeVar('KT')
lowerCAmelCase : Tuple = TypeVar('VT')
class _A ( Generic[KT, VT]):
def __init__( self , _SCREAMING_SNAKE_CASE = "root" , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = key
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
SCREAMING_SNAKE_CASE_ : list[Node[KT, VT]] = []
def __repr__( self ):
"""simple docstring"""
return f"Node({self.key}: {self.value})"
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.forward )
class _A ( Generic[KT, VT]):
def __init__( self , _SCREAMING_SNAKE_CASE = 0.5 , _SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Any = p
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_level
def __str__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(self )
if len(snake_case_ ) == 0:
return f"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE_ : str = max((len(str(snake_case_ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE_ : Any = max(snake_case_ , 4 ) + 4
SCREAMING_SNAKE_CASE_ : Any = self.head
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[Any] = node.forward.copy()
lines.append(f"[{node.key}]".ljust(snake_case_ , '-' ) + '* ' * len(snake_case_ ) )
lines.append(' ' * label_size + '| ' * len(snake_case_ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = node.forward[0]
lines.append(
f"[{node.key}]".ljust(snake_case_ , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(snake_case_ ) )
SCREAMING_SNAKE_CASE_ : Dict = node.forward
lines.append('None'.ljust(snake_case_ ) + '* ' * len(snake_case_ ) )
return f"SkipList(level={self.level})\n" + "\n".join(snake_case_ )
def __iter__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.forward[0]
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE_ : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self._locate_node(snake_case_ )
if node is not None:
for i, update_node in enumerate(snake_case_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE_ : Optional[int] = node.forward[i]
else:
SCREAMING_SNAKE_CASE_ : int = update_node.forward[:i]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self._locate_node(snake_case_ )
if node is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = value
else:
SCREAMING_SNAKE_CASE_ : Any = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case_ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE_ : List[str] = level
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Node(snake_case_ , snake_case_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case_ )
else:
SCREAMING_SNAKE_CASE_ : Dict = new_node
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self._locate_node(snake_case_ )
if node is not None:
return node.value
return None
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 1_2 )
skip_list.insert('Key3' , 4_1 )
skip_list.insert('Key4' , -1_9 )
SCREAMING_SNAKE_CASE_ : Tuple = skip_list.head
SCREAMING_SNAKE_CASE_ : List[str] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE_ : List[Any] = node.forward[0]
SCREAMING_SNAKE_CASE_ : List[str] = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SkipList()
skip_list.insert('Key1' , 1_0 )
skip_list.insert('Key1' , 1_2 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 1_0 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 1_0 )
SCREAMING_SNAKE_CASE_ : Dict = skip_list.head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE_ : str = node.forward[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = SkipList()
assert skip_list.find('Some key' ) is None
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = SkipList()
skip_list.insert('Key2' , 2_0 )
assert skip_list.find('Key2' ) == 2_0
skip_list.insert('Some Key' , 1_0 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 1_3 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 1_0
assert skip_list.find('V' ) == 1_3
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 1_4
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4_2 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('X' )
def traverse_keys(a ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ):
"""simple docstring"""
def is_sorted(a ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
SCREAMING_SNAKE_CASE_ : List[str] = SkipList()
for i in range(1_0 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(_UpperCAmelCase ) )
def A_ ( ):
"""simple docstring"""
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 253
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ : Dict = get_logger(__name__)
lowerCamelCase_ : List[str] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
for processor in self:
A_ : Tuple = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A_ : Tuple = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
A_ : Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A_ : Optional[int] = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = scores / self.temperature
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A_ : str = top_p
A_ : Union[str, Any] = filter_value
A_ : int = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : Tuple = lax.top_k(snake_case_ , scores.shape[-1] )
A_ : List[Any] = jnp.full_like(snake_case_ , self.filter_value )
A_ : List[str] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
A_ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A_ : Union[str, Any] = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
A_ : int = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
A_ : Optional[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A_ : str = max(snake_case_ , snake_case_ )
A_ : Union[str, Any] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : int = scores.shape
A_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
A_ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
A_ , A_ : Dict = lax.top_k(snake_case_ , snake_case_ )
A_ : Optional[int] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A_ : int = topk_scores.flatten()
A_ : Any = topk_indices.flatten() + shift
A_ : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
A_ : Union[str, Any] = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
A_ : str = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = max_length
A_ : Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A_ : Dict = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A_ : Any = min_length
A_ : List[Any] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A_ : Optional[Any] = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
A_ : Tuple = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
A_ : int = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Any = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A_ : Tuple = force_token_array.at[index].set(snake_case_ )
A_ : Any = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
def _force_token(snake_case_ ):
A_ : List[Any] = scores.shape[0]
A_ : Any = self.force_token_array[generation_idx]
A_ : Tuple = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float('inf' )
A_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A_ : int = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
A_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = generate_config.eos_token_id
A_ : Optional[int] = generate_config.no_timestamps_token_id
A_ : List[str] = generate_config.no_timestamps_token_id + 1
A_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , 'max_initial_timestamp_index' ):
A_ : List[Any] = generate_config.max_initial_timestamp_index
else:
A_ : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A_ : Optional[Any] = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(snake_case_ , snake_case_ ):
A_ : Any = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
A_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
A_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , snake_case_ , )
A_ : Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
A_ : Optional[Any] = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
A_ : int = self.timestamp_begin + self.max_initial_timestamp_index
A_ : List[Any] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A_ : Any = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
A_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A_ : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , snake_case_ , )
A_ : Union[str, Any] = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores
| 286
| 0
|
from __future__ import annotations
import pandas as pd
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> list[int]:
A__ : Optional[int] = [0] * no_of_processes
A__ : Dict = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase_ ):
A__ : str = burst_time[i]
A__ : Dict = 0
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 999999999
A__ : str = 0
A__ : Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A__ : Optional[Any] = remaining_time[j]
A__ : Any = j
A__ : Optional[Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A__ : List[Any] = remaining_time[short]
if minm == 0:
A__ : Optional[int] = 999999999
if remaining_time[short] == 0:
complete += 1
A__ : Optional[Any] = False
# Find finish time of current process
A__ : Optional[int] = increment_time + 1
# Calculate waiting time
A__ : Dict = finish_time - arrival_time[short]
A__ : Dict = finar - burst_time[short]
if waiting_time[short] < 0:
A__ : Optional[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCamelCase (lowercase_: list[int] , lowercase_: int , lowercase_: list[int] ) -> list[int]:
A__ : Dict = [0] * no_of_processes
for i in range(lowercase_ ):
A__ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] , lowercase_: int ) -> None:
A__ : Optional[int] = 0
A__ : Dict = 0
for i in range(lowercase_ ):
A__ : Dict = total_waiting_time + waiting_time[i]
A__ : str = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
A_ : Optional[Any] = int(input())
A_ : Union[str, Any] = [0] * no_of_processes
A_ : List[Any] = [0] * no_of_processes
A_ : Union[str, Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
A_ : int = map(int, input().split())
A_ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A_ : int = burst_time
A_ : List[str] = no_of_processes
A_ : int = waiting_time
A_ : Tuple = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
A_ : List[str] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 360
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , A__=None , **A__ ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ : Dict = model
A__ : Any = kwargs.get("""model_save_dir""" , A__ )
A__ : Optional[int] = kwargs.get("""latest_model_name""" , A__ )
def __call__( self , **A__ ):
A__ : int = {k: np.array(A__ ) for k, v in kwargs.items()}
return self.model.run(A__ , A__ )
@staticmethod
def __A ( A__ , A__=None , A__=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(A__ , providers=[provider] , sess_options=A__ )
def __A ( self , A__ , A__ = None , **A__ ):
A__ : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A__ : Optional[int] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ : str = self.model_save_dir.joinpath(A__ )
if src_path.exists():
A__ : List[str] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
def __A ( self , A__ , **A__ , ):
if os.path.isfile(A__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(A__ , exist_ok=A__ )
# saving model weights/files
self._save_pretrained(A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = None , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , A__ = None , **A__ , ):
A__ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A__ ):
A__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(A__ , A__ ) , provider=A__ , sess_options=A__ )
A__ : Optional[Any] = Path(A__ )
# load model from hub
else:
# download model
A__ : Union[str, Any] = hf_hub_download(
repo_id=A__ , filename=A__ , use_auth_token=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , )
A__ : List[str] = Path(A__ ).parent
A__ : str = Path(A__ ).name
A__ : Optional[int] = OnnxRuntimeModel.load_model(A__ , provider=A__ , sess_options=A__ )
return cls(model=A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = True , A__ = None , A__ = None , **A__ , ):
A__ : Optional[Any] = None
if len(str(A__ ).split("""@""" ) ) == 2:
A__ , A__ : Union[str, Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , use_auth_token=A__ , **A__ , )
| 141
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Tuple = filter(lambda snake_case__ : p.requires_grad , model.parameters() )
_snake_case : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
A_ = logging.getLogger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
if metric == "rouge2":
_snake_case : Any = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_snake_case : Tuple = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_snake_case : str = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
_snake_case : str = ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=F"val_{metric}" , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode="""min""" if """loss""" in metric else """max""" , patience=snake_case__ , verbose=snake_case__ , )
class lowercase( pl.Callback ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[int], a_: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(a_ )
@rank_zero_only
def UpperCamelCase_ ( self: int, a_: pl.Trainer, a_: pl.LightningModule, a_: str, a_: Tuple=True ):
'''simple docstring'''
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
_snake_case : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_snake_case : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : Tuple = od / """test_results.txt"""
_snake_case : str = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
_snake_case : Optional[Any] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=a_ )
generations_file.parent.mkdir(exist_ok=a_ )
with open(a_, """a+""" ) as writer:
for key in sorted(a_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : str = metrics[key]
if isinstance(a_, torch.Tensor ):
_snake_case : int = val.item()
_snake_case : int = f"{key}: {val:.6f}\n"
writer.write(a_ )
if not save_generations:
return
if "preds" in metrics:
_snake_case : str = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(a_ )
@rank_zero_only
def UpperCamelCase_ ( self: Tuple, a_: Optional[Any], a_: Any ):
'''simple docstring'''
try:
_snake_case : Dict = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : Tuple = pl_module.model.num_parameters()
_snake_case : int = count_trainable_parameters(a_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase_ ( self: List[str], a_: pl.Trainer, a_: pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(a_, a_, """test""" )
@rank_zero_only
def UpperCamelCase_ ( self: Tuple, a_: pl.Trainer, a_: Any ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 64
|
lowerCAmelCase_ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case_ : str = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(_UpperCamelCase )}'''
)
raise ValueError(_UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> bool:
snake_case_ = [int(UpperCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(UpperCAmelCase ) == 4 and all(0 <= int(UpperCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
__UpperCamelCase = input().strip()
__UpperCamelCase = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 355
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 0
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__SCREAMING_SNAKE_CASE :List[Any] = 0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__SCREAMING_SNAKE_CASE :Optional[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class A_ :
def __init__( self : List[Any] ):
_UpperCAmelCase = WATERMARK_BITS
_UpperCAmelCase = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def lowercase ( self : int , snake_case_ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
_UpperCAmelCase = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = [self.encoder.encode(snake_case_ , "dwtDct" ) for image in images]
_UpperCAmelCase = torch.from_numpy(np.array(snake_case_ ) ).permute(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 22
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """yolos"""
def __init__( self : List[str] , snake_case_ : str=768 , snake_case_ : Optional[Any]=12 , snake_case_ : Dict=12 , snake_case_ : Any=3072 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Any=0.0 , snake_case_ : Any=0.0 , snake_case_ : Dict=0.02 , snake_case_ : Union[str, Any]=1e-12 , snake_case_ : Tuple=[512, 864] , snake_case_ : Tuple=16 , snake_case_ : List[str]=3 , snake_case_ : int=True , snake_case_ : Any=100 , snake_case_ : Dict=True , snake_case_ : Dict=False , snake_case_ : int=1 , snake_case_ : str=5 , snake_case_ : Optional[Any]=2 , snake_case_ : Optional[int]=5 , snake_case_ : Dict=2 , snake_case_ : Union[str, Any]=0.1 , **snake_case_ : int , ):
super().__init__(**snake_case_ )
UpperCamelCase_: Any = hidden_size
UpperCamelCase_: Tuple = num_hidden_layers
UpperCamelCase_: str = num_attention_heads
UpperCamelCase_: Union[str, Any] = intermediate_size
UpperCamelCase_: Any = hidden_act
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: str = layer_norm_eps
UpperCamelCase_: Union[str, Any] = image_size
UpperCamelCase_: str = patch_size
UpperCamelCase_: Tuple = num_channels
UpperCamelCase_: str = qkv_bias
UpperCamelCase_: Dict = num_detection_tokens
UpperCamelCase_: Tuple = use_mid_position_embeddings
UpperCamelCase_: str = auxiliary_loss
# Hungarian matcher
UpperCamelCase_: Any = class_cost
UpperCamelCase_: Dict = bbox_cost
UpperCamelCase_: Any = giou_cost
# Loss coefficients
UpperCamelCase_: Dict = bbox_loss_coefficient
UpperCamelCase_: int = giou_loss_coefficient
UpperCamelCase_: Optional[int] = eos_coefficient
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : Dict ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase__ ( self : str ):
return 12
| 223
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : str = "utf-8"
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True # deprecated
__UpperCamelCase : Optional[int] = None # deprecated
__UpperCamelCase : int = 10 << 20 # 10MB
__UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Tuple = JsonConfig
def lowerCAmelCase__ ( self : int ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase_: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase_: Dict = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = [files]
UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Dict = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase_: Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase_: Optional[int] = dataset
UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase_: Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase_: int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase_: Tuple = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Optional[Any] = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 223
| 1
|
def UpperCamelCase__( UpperCamelCase__ : int = 1_00_00_00 )->int:
A__ = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
A__ = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 193
|
def UpperCamelCase__( UpperCamelCase__ : Dict )->Dict:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
A__ = len(UpperCamelCase__ )
A__ = max(UpperCamelCase__ )
A__ = min(UpperCamelCase__ )
# create the counting array
A__ = coll_max + 1 - coll_min
A__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
A__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
A__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
A__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->Tuple:
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
a__: Dict = input('Enter numbers separated by a comma:\n').strip()
a__: Any = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 193
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase : Dict = [144, 192, 240]
lowercase : Dict = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase : Any = [96, 120, 144]
lowercase : int = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase : List[str] = [64, 80, 96]
lowercase : Any = [16, 16, 24, 48, 64, 80, 320]
lowercase : Tuple = 0.05
lowercase : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase : Optional[Any] = 512
lowercase : Union[str, Any] = 16
lowercase : List[str] = 21
lowercase : Union[str, Any] = """pascal-voc-id2label.json"""
else:
lowercase : Union[str, Any] = 1_000
lowercase : Optional[int] = """imagenet-1k-id2label.json"""
lowercase : Tuple = """huggingface/label-files"""
lowercase : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : List[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : Tuple = idalabel
lowercase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
for i in range(1 , 6 ):
if f"layer_{i}." in name:
lowercase : Optional[Any] = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
lowercase : str = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase : Optional[Any] = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase : int = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase : Tuple = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase : str = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase : Any = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase : Dict = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase : List[str] = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
lowercase : Optional[Any] = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
lowercase : str = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
lowercase : List[Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase : Union[str, Any] = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase : Tuple = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
lowercase : Union[str, Any] = name.replace(f".global_rep.{i}.weight" , """.layernorm.weight""" )
if f".global_rep.{i}.bias" in name:
lowercase : Union[str, Any] = name.replace(f".global_rep.{i}.bias" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase : List[Any] = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase : Dict = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase : List[Any] = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase : Optional[int] = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase : Tuple = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase : List[str] = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase : int = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase : str = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase : Optional[int] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase : Optional[int] = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase : Dict = """mobilevit.""" + name
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> int:
if base_model:
lowercase : Union[str, Any] = """"""
else:
lowercase : Optional[Any] = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase : Optional[int] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key[:8] == "encoder.":
lowercase : Union[str, Any] = key[8:]
if "qkv" in key:
lowercase : Optional[int] = key.split(""".""" )
lowercase : Optional[int] = int(key_split[0][6:] ) - 1
lowercase : int = int(key_split[3] )
lowercase : Union[str, Any] = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
lowercase : str = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase : Optional[int] = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
lowercase : str = val[:dim, :]
lowercase : Optional[Any] = val[dim : dim * 2, :]
lowercase : Union[str, Any] = val[-dim:, :]
else:
lowercase : Tuple = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Dict = val[-dim:]
else:
lowercase : str = val
return orig_state_dict
def _snake_case( ) -> Union[str, Any]:
lowercase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Any:
lowercase : str = get_mobilevit_config(SCREAMING_SNAKE_CASE__ )
# load original state_dict
lowercase : List[str] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase : Dict = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ).eval()
else:
lowercase : List[str] = MobileViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase : Any = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase : List[str] = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowercase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
lowercase : Optional[Any] = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase : Tuple = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
lowercase : Union[str, Any] = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase : Dict = model_mapping[mobilevit_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""apple""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""apple""" )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : List[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 285
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _snake_case( ) -> tuple[list[int], int]:
lowercase : List[Any] = [randint(-1_000 , 1_000 ) for i in range(10 )]
lowercase : Tuple = randint(-5_000 , 5_000 )
return (arr, r)
lowercase : List[Any] = make_dataset()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, ...]:
for triplet in permutations(SCREAMING_SNAKE_CASE__ , 3 ):
if sum(SCREAMING_SNAKE_CASE__ ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE__ ) )
return (0, 0, 0)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, int, int]:
arr.sort()
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
lowercase , lowercase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _snake_case( ) -> tuple[float, float]:
lowercase : Dict = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
lowercase : Tuple = """
triplet_sum1(*dataset)
"""
lowercase : int = """
triplet_sum2(*dataset)
"""
lowercase : str = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
lowercase : Dict = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
return (min(SCREAMING_SNAKE_CASE__ ), min(SCREAMING_SNAKE_CASE__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : Union[str, Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 285
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
A =None
A =logging.get_logger(__name__)
A ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A ={
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
A ={
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
A ='▁'
class _a ( __lowerCamelCase ):
__a : List[str] = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[Any] = ["""input_ids""", """token_type_ids"""]
__a : List[str] = FNetTokenizer
def __init__( self : Union[str, Any] , lowercase : str=None , lowercase : List[str]=None , lowercase : Optional[int]=False , lowercase : Any=True , lowercase : Optional[int]=True , lowercase : Dict="<unk>" , lowercase : Optional[int]="[SEP]" , lowercase : Dict="<pad>" , lowercase : Optional[int]="[CLS]" , lowercase : Tuple="[MASK]" , **lowercase : Tuple , ):
'''simple docstring'''
UpperCAmelCase = (
AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase , normalized=lowercase )
if isinstance(lowercase , lowercase )
else mask_token
)
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , **lowercase , )
UpperCAmelCase = do_lower_case
UpperCAmelCase = remove_space
UpperCAmelCase = keep_accents
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def A ( self : int , lowercase : List[Any] , lowercase : Union[str, Any] = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Tuple , lowercase : Any , lowercase : Union[str, Any] = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Any , lowercase : Tuple , lowercase : List[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 34
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 1.5
lowercase = int(factor * num_class_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase = client.query(text=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase = int(factor * num_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowercase = 0
lowercase = 0
lowercase = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowercase = class_images[count]
count += 1
try:
lowercase = requests.get(images['url'] )
if img.status_code == 200:
lowercase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 195
| 0
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ :List[str] = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self ,A__ = True):
lowercase = {} # dictionary of lists
lowercase = directed
def A__ ( self ,A__ ,A__):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_a)
self.adj_list[destination_vertex].append(_a)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_a)
lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_a)
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase = [destination_vertex]
lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_a)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_a)
lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase = [destination_vertex]
lowercase = []
return self
def __repr__( self):
return pformat(self.adj_list)
| 365
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = LEDTokenizerFast
lowerCAmelCase__ = True
def lowercase_ ( self : int ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase__ : Any = {'''unk_token''': '''<unk>'''}
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def lowercase_ ( self : Optional[int] , **_A : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , _A : List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def lowercase_ ( self : Any ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Union[str, Any] = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Optional[Any] = tokenizer(text_target=_A , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Any = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = ['''A long paragraph for summarization.''']
UpperCAmelCase__ : List[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Optional[Any] = tokenizer(_A , return_tensors='''pt''' )
UpperCAmelCase__ : int = tokenizer(text_target=_A , return_tensors='''pt''' )
UpperCAmelCase__ : str = inputs['''input_ids''']
UpperCAmelCase__ : Tuple = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Tuple = ['''Summary of the text.''', '''Another summary.''']
UpperCAmelCase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A )
UpperCAmelCase__ : str = [[0] * len(_A ) for x in encoded_output['''input_ids''']]
UpperCAmelCase__ : Any = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : Any = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase__ : Dict = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
UpperCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 181
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( _UpperCAmelCase):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowercase_ , """depth_multiplier""" ) )
class __magic_name__ :
def __init__( self : List[str] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : List[str]=3 , lowercase_ : Tuple=32 , lowercase_ : List[Any]=0.25 , lowercase_ : int=8 , lowercase_ : Any=8 , lowercase_ : Tuple=6 , lowercase_ : Union[str, Any]=32 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]="relu6" , lowercase_ : Dict=1280 , lowercase_ : Any=0.1 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=None , ):
lowercase_ : Optional[int] = parent
lowercase_ : Any = batch_size
lowercase_ : List[str] = num_channels
lowercase_ : Union[str, Any] = image_size
lowercase_ : List[Any] = depth_multiplier
lowercase_ : Dict = depth_divisible_by
lowercase_ : Optional[Any] = min_depth
lowercase_ : Dict = expand_ratio
lowercase_ : str = tf_padding
lowercase_ : List[str] = output_stride
lowercase_ : List[str] = first_layer_is_expansion
lowercase_ : List[str] = finegrained_output
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase_ : List[str] = classifier_dropout_prob
lowercase_ : List[Any] = use_labels
lowercase_ : int = is_training
lowercase_ : Tuple = num_labels
lowercase_ : Any = initializer_range
lowercase_ : Dict = scope
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] ):
lowercase_ : Any = self.num_labels
lowercase_ : int = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : int ):
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Optional[int] = MobileNetVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ : Dict = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ : Tuple = config_and_inputs
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = MobileNetVaModelTester(self )
lowercase_ : Optional[int] = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(lowercase_ )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : int ):
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Optional[int] = outputs.hidden_states
lowercase_ : List[Any] = 16
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Tuple:
lowercase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[int] = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : List[str] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase_ : List[str] = model.to(lowercase_ )
lowercase_ : Optional[Any] = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : str = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : int = model(**lowercase_ )
lowercase_ : List[Any] = outputs.logits
# verify the logits
lowercase_ : List[Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase_ )
lowercase_ : List[Any] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
| 363
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase__ ( unittest.TestCase):
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = BlipImageProcessor()
SCREAMING_SNAKE_CASE : Any = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
SCREAMING_SNAKE_CASE : Tuple = BlipProcessor(UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Dict , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def __A ( self : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __A ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Any = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : Optional[int] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(UpperCamelCase__ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Tuple = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = '''lower newer'''
SCREAMING_SNAKE_CASE : Dict = processor(text=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = '''lower newer'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : List[Any] = processor.batch_decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = BlipProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''lower newer'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Tuple = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 182
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A ( _lowercase ):
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *_lowercase , **_lowercase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowercase , **_lowercase )
return wrapper
| 182
| 1
|
import math
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return math.sqrt(UpperCamelCase__ ) * math.sqrt(UpperCamelCase__ ) == num
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = n
while left <= right:
snake_case_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
snake_case_ = mid - 1
else:
snake_case_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200
|
import requests
_UpperCAmelCase : Union[str, Any] = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 200
| 1
|
'''simple docstring'''
import json
import sys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : Any = json.load(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = results[benchmark_name]
UpperCAmelCase__ : str = benchmark_name.split('''/''' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase__ : Optional[Any] = '''| metric |'''
UpperCAmelCase__ : Tuple = '''|--------|'''
UpperCAmelCase__ : List[str] = '''| new / old (diff) |'''
for metric_name in sorted(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = benchmark_res[metric_name]
UpperCAmelCase__ : Tuple = metric_vals['''new''']
UpperCAmelCase__ : List[Any] = metric_vals.get('''old''' , lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = metric_vals.get('''diff''' , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = F""" {new_val:f}""" if isinstance(lowerCAmelCase__ , (int, float) ) else '''None'''
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(lowerCAmelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(lowerCAmelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(lowerCAmelCase__ ) )
if __name__ == "__main__":
UpperCamelCase__ = sys.argv[1]
UpperCamelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 181
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( __a ):
def __init__( self : List[str] , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[str] = None , _A : Optional[int] = None , **_A : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
UpperCAmelCase__ : List[str] = field
UpperCAmelCase__ : Optional[Any] = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
UpperCAmelCase__ : Any = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
UpperCAmelCase__ : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
UpperCAmelCase__ : str = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Dataset , _A : Union[PathLike, BinaryIO] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : List[str] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase__ : Dict = dataset
UpperCAmelCase__ : Any = path_or_buf
UpperCAmelCase__ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : Tuple = num_proc
UpperCAmelCase__ : Any = '''utf-8'''
UpperCAmelCase__ : Optional[int] = to_json_kwargs
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.to_json_kwargs.pop('''path_or_buf''' , _A )
UpperCAmelCase__ : Optional[int] = self.to_json_kwargs.pop('''orient''' , '''records''' )
UpperCAmelCase__ : Tuple = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
UpperCAmelCase__ : str = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
UpperCAmelCase__ : Optional[Any] = self.to_json_kwargs.pop('''compression''' , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_A ) as buffer:
UpperCAmelCase__ : Union[str, Any] = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
UpperCAmelCase__ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def lowercase_ ( self : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = args
UpperCAmelCase__ : Dict = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : str = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase_ ( self : Union[str, Any] , _A : BinaryIO , _A : Optional[int] , _A : int , _A : Any , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
UpperCAmelCase__ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_A )
return written
| 181
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = ""
lowerCAmelCase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(self , **UpperCAmelCase )
lowercase_ = repo_info
lowercase_ = token
lowercase_ = None
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
lowercase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCAmelCase ): {"name": str(UpperCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = "rb" , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
if not isinstance(self.repo_info , UpperCAmelCase ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
lowercase_ = hf_hub_url(self.repo_info.id , UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
UpperCAmelCase , mode=UpperCAmelCase , headers=get_authentication_headers_for_url(UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
self._get_dirs()
lowercase_ = self._strip_protocol(UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
self._get_dirs()
lowercase_ = PurePosixPath(path.strip("/" ) )
lowercase_ = {}
for p, f in self.dir_cache.items():
lowercase_ = PurePosixPath(p.strip("/" ) )
lowercase_ = p.parent
if root == path:
lowercase_ = f
lowercase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCAmelCase__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
lowerCAmelCase__ = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
lowerCAmelCase__ = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
lowerCAmelCase__ = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
lowerCAmelCase__ = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCAmelCase__ = np.expand_dims(test_image, axis=0)
lowerCAmelCase__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCAmelCase__ = '''Normal'''
if result[0][0] == 1:
lowerCAmelCase__ = '''Abnormality detected'''
| 130
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _A ( lowercase ):
"""simple docstring"""
# vision encoder
if "img_encoder.pos_embed" in name:
a =name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
a =name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
a =name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
a =name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
a =name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
a =name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
a =name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
a =name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
a =name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
a =name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
a =name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
a =name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
a =name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
a =name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
a =name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
a =name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
a =name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
a =name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
a =name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
a =name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
a =name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
a =name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
a =name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
a =name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def _A ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a =orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a =key.split('''.''' )
a , a =int(key_split[2] ), int(key_split[4] )
a =config.vision_config.hidden_size
if "weight" in key:
a =val[:dim, :]
a =val[dim : dim * 2, :]
a =val[-dim:, :]
else:
a =val[:dim]
a =val[dim : dim * 2]
a =val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
a =key.split('''.''' )
a =int(key_split[3] )
a =config.text_config.hidden_size
if "weight" in key:
a =val[:dim, :]
a =val[
dim : dim * 2, :
]
a =val[-dim:, :]
else:
a =val[:dim]
a =val[dim : dim * 2]
a =val[-dim:]
else:
a =rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
a =val.squeeze_()
else:
a =val
return orig_state_dict
def _A ( ):
"""simple docstring"""
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ):
"""simple docstring"""
a =GroupViTConfig()
a =GroupViTModel(lowercase ).eval()
a =torch.load(lowercase , map_location='''cpu''' )['''model''']
a =convert_state_dict(lowercase , lowercase )
a , a =model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
a =CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
a =prepare_img()
a =processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowercase , padding=lowercase , return_tensors='''pt''' )
with torch.no_grad():
a =model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
a =torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
a =torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1E-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print('''Successfully saved processor and model to''' , lowercase )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowercase , organization='''nielsr''' )
model.push_to_hub(lowercase , organization='''nielsr''' )
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 363
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **__A ) -> Dict:
super().__init__(**__A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__A )
def __call__( self , __A , __A = None , **__A , ) -> List[str]:
if "text_queries" in kwargs:
a =kwargs.pop('''text_queries''' )
if isinstance(__A , (str, Image.Image) ):
a ={'''image''': image, '''candidate_labels''': candidate_labels}
else:
a =image
a =super().__call__(__A , **__A )
return results
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[Any]:
a ={}
if "threshold" in kwargs:
a =kwargs['''threshold''']
if "top_k" in kwargs:
a =kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =load_image(inputs['''image'''] )
a =inputs['''candidate_labels''']
if isinstance(__A , __A ):
a =candidate_labels.split(''',''' )
a =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__A ):
a =self.tokenizer(__A , return_tensors=self.framework )
a =self.image_processor(__A , return_tensors=self.framework )
yield {
"is_last": i == len(__A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
a =model_inputs.pop('''target_size''' )
a =model_inputs.pop('''candidate_label''' )
a =model_inputs.pop('''is_last''' )
a =self.model(**__A )
a ={'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self , __A , __A=0.1 , __A=None ) -> List[str]:
a =[]
for model_output in model_outputs:
a =model_output['''candidate_label''']
a =BaseModelOutput(__A )
a =self.image_processor.post_process_object_detection(
outputs=__A , threshold=__A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
a =outputs['''scores'''][index].item()
a =self._get_bounding_box(outputs['''boxes'''][index][0] )
a ={'''score''': score, '''label''': label, '''box''': box}
results.append(__A )
a =sorted(__A , key=lambda __A : x["score"] , reverse=__A )
if top_k:
a =results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
a , a , a , a =box.int().tolist()
a ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 215
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ :Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =XLMProphetNetTokenizer
UpperCamelCase__ : int =False
UpperCamelCase__ : Optional[int] =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase : List[Any] =XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='[PAD]'
__UpperCamelCase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCamelCase__ ) , 1012 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =XLMProphetNetTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__UpperCamelCase : List[str] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCamelCase : List[str] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__UpperCamelCase : List[str] =tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__UpperCamelCase : int =tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def __lowercase ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='Hello World!'
__UpperCamelCase : Union[str, Any] =[35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int ={'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 71
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='''roformer'''
def __init__( self : int , __a : Union[str, Any]=5_00_00 , __a : int=None , __a : str=7_68 , __a : List[str]=12 , __a : Any=12 , __a : List[Any]=30_72 , __a : List[Any]="gelu" , __a : Optional[int]=0.1 , __a : str=0.1 , __a : List[Any]=15_36 , __a : Union[str, Any]=2 , __a : Tuple=0.02 , __a : int=1e-1_2 , __a : List[str]=0 , __a : Dict=False , __a : Dict=True , **__a : Tuple , ):
super().__init__(pad_token_id=_a , **_a )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 363
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346
| 0
|
from __future__ import annotations
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = []
__magic_name__ = 0
__magic_name__ = sum(A_ )
create_state_space_tree(A_, A_, A_, A_, A_, A_ )
return result
def a__ ( A_, A_, A_, A_, A_, A_, ):
'''simple docstring'''
if sum(A_ ) > max_sum or (remaining_nums_sum + sum(A_ )) < max_sum:
return
if sum(A_ ) == max_sum:
result.append(A_ )
return
for index in range(A_, len(A_ ) ):
create_state_space_tree(
A_, A_, index + 1, [*path, nums[index]], A_, remaining_nums_sum - nums[index], )
__lowerCAmelCase : Tuple = [3, 34, 4, 12, 5, 2]
__lowerCAmelCase : Tuple = 9
__lowerCAmelCase : Optional[int] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 88
|
from maths.prime_check import is_prime
def __lowercase ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase )
if is_prime(lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
def __snake_case( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 10
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1, 2, 3, 4]
SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCamelCase , self.block_size , 0 ) , _UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCamelCase , self.block_size , 0 ) , _UpperCamelCase )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCamelCase , self.block_size , 0 ) , _UpperCamelCase )
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , [] )
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , [] )
self.assertEqual(_UpperCamelCase , [] )
def __snake_case( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = ["It was the best of times."]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCamelCase , 0 ).numpy() , expected.numpy() )
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCamelCase , 23 ).numpy() , expected.numpy() )
def __snake_case( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCamelCase , 1 ).numpy() , expected.numpy() )
def __snake_case( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 101
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE = compute_token_type_ids(_UpperCamelCase , _UpperCamelCase )
np.testing.assert_array_equal(_UpperCamelCase , _UpperCamelCase )
| 206
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : List[Any] = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Any = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names}
_lowerCamelCase : Optional[Any] = {f"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = FunnelTokenizer
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = 2
def __init__( self : str , _UpperCamelCase : str=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : str="<unk>" , _UpperCamelCase : Optional[Any]="<sep>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : int="<cls>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : Union[str, Any]="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=None , _UpperCamelCase : Dict="##" , **_UpperCamelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , clean_text=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , wordpieces_prefix=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206
| 1
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
A_ = 1_00
A_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_snake_case : set[int] = set()
_snake_case : int
_snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase__ (snake_case__ : int = 50_00 ):
"""simple docstring"""
for number_to_partition in range(1 , snake_case__ ):
if len(partition(snake_case__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
| 0
|
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
__UpperCamelCase : Tuple = b * b - 4 * a * c
__UpperCamelCase : Union[str, Any] = (-b + sqrt(_lowerCAmelCase )) / (2 * a)
__UpperCamelCase : List[str] = (-b - sqrt(_lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase_ ():
__UpperCamelCase , __UpperCamelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 171
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=3 , __UpperCamelCase=None , ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Union[str, Any] = num_stages
__UpperCamelCase : List[Any] = hidden_sizes
__UpperCamelCase : Optional[Any] = depths
__UpperCamelCase : Dict = is_training
__UpperCamelCase : List[Any] = use_labels
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Tuple = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : List[Any] = out_features
__UpperCamelCase : Optional[Any] = num_labels
__UpperCamelCase : Optional[Any] = scope
__UpperCamelCase : List[Any] = num_stages
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase : Dict = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase : Union[str, Any] = False
lowercase : Tuple = False
lowercase : Optional[int] = False
lowercase : Tuple = False
lowercase : List[str] = False
lowercase : Any = False
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = UperNetModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : List[str] = [*signature.parameters.keys()]
__UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = _config_zero_init(__UpperCamelCase )
__UpperCamelCase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase_ ():
__UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
__UpperCamelCase : List[str] = Image.open(_lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Any = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : Any = model(**__UpperCamelCase )
__UpperCamelCase : Tuple = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__UpperCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : int = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : int = model(**__UpperCamelCase )
__UpperCamelCase : Dict = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 171
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __lowerCamelCase):
"""simple docstring"""
A__ = ["""image_processor""", """tokenizer"""]
A__ = """BridgeTowerImageProcessor"""
A__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] = None , __lowerCamelCase : List[str] = True , __lowerCamelCase : Union[str, Any] = False , __lowerCamelCase : Tuple = None , __lowerCamelCase : int = None , __lowerCamelCase : int = 0 , __lowerCamelCase : List[str] = None , __lowerCamelCase : Any = None , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : int = False , __lowerCamelCase : str = False , __lowerCamelCase : Any = False , __lowerCamelCase : Optional[int] = False , __lowerCamelCase : Dict = True , __lowerCamelCase : List[Any] = None , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel_values + pixel_mask
lowerCamelCase__ : Union[str, Any] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , do_normalize=__lowerCamelCase , do_center_crop=__lowerCamelCase , **__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , *__lowerCamelCase : str , **__lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.tokenizer.model_input_names
lowerCamelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 184
|
import sys
from collections import defaultdict
class A_ :
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase = 2 * start + 1
else:
lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase , lowercase = heap[smallest_child], positions[smallest_child]
lowercase , lowercase = (
heap[start],
positions[start],
)
lowercase , lowercase = temp, tempa
lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case )
self.top_to_bottom(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = position[index]
while index != 0:
lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowercase = heap[parent]
lowercase = position[parent]
self.set_position(position[parent] , snake_case )
else:
lowercase = val
lowercase = temp
self.set_position(snake_case , snake_case )
break
lowercase = parent
else:
lowercase = val
lowercase = temp
self.set_position(snake_case , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = len(snake_case ) // 2 - 1
for i in range(snake_case , -1 , -1 ):
self.top_to_bottom(snake_case , snake_case , len(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = positions[0]
lowercase = sys.maxsize
self.top_to_bottom(snake_case , 0 , len(snake_case ) , snake_case )
return temp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = Heap()
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = [-1] * len(__SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase = [] # Heap of Distance of vertices from their neighboring vertex
lowercase = []
for vertex in range(len(__SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(__SCREAMING_SNAKE_CASE )
heap.node_position.append(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = 1
lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase = 0
lowercase = distance
heap.heapify(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for _ in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
lowercase = heap.delete_minimum(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__SCREAMING_SNAKE_CASE )]
):
lowercase = distance
heap.bottom_to_top(
__SCREAMING_SNAKE_CASE , heap.get_position(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase = int(input('''Enter number of edges: ''').strip())
UpperCAmelCase = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 195
| 0
|
lowercase__ =[
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 361
|
import os
import sys
import unittest
lowercase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase__ =os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase__ =os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : str = get_test_to_tester_mapping(snake_case_ )
__a : Tuple = get_test_to_tester_mapping(snake_case_ )
__a : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__a : Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : str ):
__a : Optional[int] = get_model_to_test_mapping(snake_case_ )
__a : Any = get_model_to_test_mapping(snake_case_ )
__a : List[Any] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__a : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Any = get_model_to_tester_mapping(snake_case_ )
__a : List[str] = get_model_to_tester_mapping(snake_case_ )
__a : Any = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__a : int = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
| 90
| 0
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , lowerCamelCase_ , )
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
UpperCamelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase = image[0].size
UpperCamelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCamelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
UpperCamelCase = np.concatenate(lowerCamelCase_ , axis=0 )
UpperCamelCase = np.array(lowerCamelCase_ ).astype(np.floataa ) / 255.0
UpperCamelCase = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase = 2.0 * image - 1.0
UpperCamelCase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase = torch.cat(lowerCamelCase_ , dim=0 )
return image
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
if isinstance(lowerCamelCase_ , torch.Tensor ):
return mask
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
UpperCamelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCamelCase = mask[0].size
UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
UpperCamelCase = np.concatenate(lowerCamelCase_ , axis=0 )
UpperCamelCase = mask.astype(np.floataa ) / 255.0
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCamelCase = torch.cat(lowerCamelCase_ , dim=0 )
return mask
class SCREAMING_SNAKE_CASE_ ( _a ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
def __init__( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] = 250 , lowerCamelCase_ : Any = 0.0 , lowerCamelCase_ : Union[str, Any] = 10 , lowerCamelCase_ : Optional[int] = 10 , lowerCamelCase_ : Dict = None , lowerCamelCase_ : int = "pil" , lowerCamelCase_ : Any = True , ):
"""simple docstring"""
UpperCamelCase = image
UpperCamelCase = _preprocess_image(lowerCamelCase_ )
UpperCamelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase = _preprocess_mask(lowerCamelCase_ )
UpperCamelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase = original_image.shape
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.device )
UpperCamelCase = eta
UpperCamelCase = self.scheduler.timesteps[0] + 1
UpperCamelCase = generator[0] if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCamelCase = self.scheduler.undo_step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = t
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 343
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21
| 0
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
A : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
A : List[Any] = self.dummy_uncond_unet
A : str = PNDMScheduler()
A : Union[str, Any] = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
A : Tuple = torch.manual_seed(0 )
A : int = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" ).images
A : str = torch.manual_seed(0 )
A : Optional[int] = pndm(generator=_a , num_inference_steps=20 , output_type="numpy" , return_dict=_a )[0]
A : Tuple = image[0, -3:, -3:, -1]
A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : List[str] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
A : Optional[Any] = "google/ddpm-cifar10-32"
A : str = UNetaDModel.from_pretrained(_a )
A : Dict = PNDMScheduler()
A : int = PNDMPipeline(unet=_a , scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
A : str = torch.manual_seed(0 )
A : Any = pndm(generator=_a , output_type="numpy" ).images
A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A : List[Any] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 350
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Any = set()
A : int = []
def parse_line(_lowerCamelCase ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Any = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
A : Union[str, Any] = "\n".join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
A : Union[str, Any] = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Tuple = set()
A : Union[str, Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase ):
return values.split("," )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets)
__SCREAMING_SNAKE_CASE = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 256
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCamelCase () -> Optional[int]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowercase_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def UpperCamelCase () -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def UpperCamelCase () -> Any:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowercase_ ):
http_head("""https://huggingface.co""" )
| 192
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A_ : List[str] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A_ : Optional[int] = 'hopper-medium-v2'
A_ : List[Any] = gym.make(env_name)
A_ : str = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A_ : List[Any] = env.reset()
A_ : Optional[int] = 0
A_ : str = 0
A_ : Optional[Any] = 1000
A_ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A_ : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
A_ , A_ , A_ , A_ : Dict = env.step(denorm_actions)
A_ : List[str] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A_ : int = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 192
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase__ = 'ssube/stable-diffusion-x4-upscaler-onnx'
def UpperCAmelCase ( self , __a=0) -> Any:
'''simple docstring'''
_UpperCamelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__a))
_UpperCamelCase = torch.manual_seed(__a)
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
_UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase( unittest.TestCase ):
@property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
_UpperCamelCase = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = '''A fantasy landscape, trending on artstation'''
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=__a , image=__a , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type='''np''' , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
_UpperCamelCase = init_image.resize((1_28, 1_28))
_UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
_UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = '''A fantasy landscape, trending on artstation'''
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(
prompt=__a , image=__a , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type='''np''' , )
_UpperCamelCase = output.images
_UpperCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_UpperCamelCase = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 362
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a)
model.to(__a)
model.eval()
# create attention mask
_UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a)
_UpperCamelCase = self.seq_length // 2
_UpperCamelCase = 0
# first forward pass
_UpperCamelCase , _UpperCamelCase = model(__a , attention_mask=__a).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
_UpperCamelCase = ids_tensor((1,) , __a).item() + 1
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
_UpperCamelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a)] , dim=1 , )
# get two different outputs
_UpperCamelCase = model(__a , attention_mask=__a)['''last_hidden_state''']
_UpperCamelCase = model(__a , past_key_values=__a , attention_mask=__a)['''last_hidden_state''']
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a).to(__a).eval()
_UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a)
# first forward pass
_UpperCamelCase = model(__a , attention_mask=__a , use_cache=__a)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_UpperCamelCase = model(__a , attention_mask=__a)['''last_hidden_state''']
_UpperCamelCase = model(__a , attention_mask=__a , past_key_values=__a)[
'''last_hidden_state'''
]
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a , __a=False) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM(__a)
model.to(__a)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def UpperCAmelCase ( self , __a , *__a) -> Any:
'''simple docstring'''
_UpperCamelCase = BioGptModel(__a)
_UpperCamelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = BioGptForTokenClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BioGptModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__a)
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__a)
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = '''left'''
# Define PAD Token = EOS Token = 50256
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = model.config.eos_token_id
# use different length sentences to test batching
_UpperCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''pt''' , padding=__a)
_UpperCamelCase = inputs['''input_ids'''].to(__a)
_UpperCamelCase = model.generate(
input_ids=__a , attention_mask=inputs['''attention_mask'''].to(__a) , )
_UpperCamelCase = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(__a)
_UpperCamelCase = model.generate(input_ids=__a)
_UpperCamelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_UpperCamelCase = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(__a)
_UpperCamelCase = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings)
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , [non_padded_sentence, padded_sentence])
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = BioGptModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = input_dict['''input_ids''']
_UpperCamelCase = input_ids.ne(1).to(__a)
_UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_UpperCamelCase = BioGptForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , labels=__a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = '''multi_label_classification'''
_UpperCamelCase = input_dict['''input_ids''']
_UpperCamelCase = input_ids.ne(1).to(__a)
_UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_UpperCamelCase = BioGptForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , labels=__a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]])
_UpperCamelCase = model(__a)[0]
_UpperCamelCase = 4_23_84
_UpperCamelCase = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4))
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__a)
torch.manual_seed(0)
_UpperCamelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(__a)
_UpperCamelCase = model.generate(
**__a , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__a , )
_UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__a)
_UpperCamelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__a , __a)
| 100
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[Any] = "roformer"
def __init__(self : Optional[Any] , snake_case_ : Optional[int]=5_0_0_0_0 , snake_case_ : Tuple=None , snake_case_ : Optional[int]=7_6_8 , snake_case_ : Tuple=1_2 , snake_case_ : Any=1_2 , snake_case_ : str=3_0_7_2 , snake_case_ : Any="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : Dict=0.1 , snake_case_ : List[Any]=1_5_3_6 , snake_case_ : int=2 , snake_case_ : Any=0.02 , snake_case_ : Dict=1E-12 , snake_case_ : Optional[int]=0 , snake_case_ : List[Any]=False , snake_case_ : str=True , **snake_case_ : Optional[Any] , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
__a : Any = vocab_size
__a : Optional[int] = hidden_size if embedding_size is None else embedding_size
__a : Tuple = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : List[Any] = hidden_act
__a : List[Any] = intermediate_size
__a : str = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Optional[Any] = initializer_range
__a : Dict = layer_norm_eps
__a : List[Any] = rotary_value
__a : int = use_cache
class UpperCamelCase__ ( __lowercase ):
@property
def lowerCAmelCase (self : Tuple ):
if self.task == "multiple-choice":
__a : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : List[str] = {0: '''batch''', 1: '''sequence'''}
__a : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 216
|
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : list[float] , lowerCAmelCase__ : list[float] ):
__a : Dict = sorted(numsa + numsa )
__a , __a : Optional[Any] = divmod(len(lowerCAmelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =[float(x) for x in input('Enter the elements of first array: ').split()]
lowercase__ =[float(x) for x in input('Enter the elements of second array: ').split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 216
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ : Any = None
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a_ : List[str] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
a_ : str = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
a_ : Tuple = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class _snake_case ( A__ ):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = ['''input_ids''', '''attention_mask''']
_lowercase : Any = MBartTokenizer
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self , a=None , a=None , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=None , a=None , a=None , **a , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
super().__init__(
vocab_file=a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , **a , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
SCREAMING_SNAKE_CASE = {
lang_code: self.convert_tokens_to_ids(a) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(self._src_lang)
SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , **a) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = self(a , add_special_tokens=a , return_tensors=a , **a)
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a)
SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a , a = "en_XX" , a = None , a = "ro_RO" , **a , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens)
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens)
SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(a)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.prefix_tokens)
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(self.suffix_tokens)
SCREAMING_SNAKE_CASE = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a):
copyfile(self.vocab_file , a)
return (out_vocab_file,)
| 327
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _snake_case ( unittest.TestCase ):
_lowercase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=a , tokenizer=a)
return generator, ["Something to write", "Something else"]
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Any:
SCREAMING_SNAKE_CASE = generator('Something there')
self.assertEqual(a , [{'generated_text': ANY(a)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=a)
self.assertEqual(
a , [
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
[{'generated_text': ANY(a)}, {'generated_text': ANY(a)}],
] , )
with self.assertRaises(a):
generator(4)
@require_torch
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=a , num_beams=a , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(a , a)
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=a , num_return_sequences=2 , return_tensors=a)
self.assertEqual(
a , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=a)
self.assertEqual(a , [{'generated_text': ''}])
| 327
| 1
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase = logging.getLogger(__name__)
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_a , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_a , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_a , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_a , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_a , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_a , type=_a , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_a , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_a , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
__lowercase= parser.parse_args()
return args
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
def fn(lowercase__ ):
return tokenizer(examples['text'] )
return fn
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= []
for i in range(len(tokenized_data['input_ids'] ) ):
__lowercase= {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
__lowercase= tf.train.Features(feature=_a )
__lowercase= tf.train.Example(features=_a )
__lowercase= example.SerializeToString()
records.append(_a )
return records
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase= min(len(_a ) , args.limit )
__lowercase= dataset.select(range(_a ) )
print(F'Limiting the dataset to {args.limit} entries.' )
__lowercase= AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase= os.path.join(args.output_dir , args.split )
if not os.path.exists(_a ):
os.makedirs(_a )
else:
__lowercase= os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase= tokenize_function(_a )
__lowercase= dataset.map(_a , batched=_a , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase__ ):
# Concatenate all texts.
__lowercase= {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase= len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase= (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase= {
k: [t[i : i + args.max_length] for i in range(0 , _a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase= dataset_tokenized.map(_a , batched=_a , batch_size=1_0_0_0 , num_proc=4 )
__lowercase= 0
__lowercase= 0
for shard in range(0 , len(_a ) , args.shard_size ):
__lowercase= grouped_dataset[shard : shard + args.shard_size]
__lowercase= len(dataset_snapshot['input_ids'] )
__lowercase= os.path.join(_a , F'dataset-{shard_count}-{records_containing}.tfrecord' )
__lowercase= get_serialized_examples(_a )
with tf.io.TFRecordWriter(_a ) as out_file:
for i in range(len(_a ) ):
__lowercase= serialized_examples[i]
out_file.write(_a )
print('Wrote file {} containing {} records'.format(_a , _a ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , 'w' ) as f:
print(F'Total {args.split} records: {total_records}' , file=_a )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
main(args)
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A_ : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class A_ ( _a ):
'''simple docstring'''
def __init__(self , *lowercase__ , **lowercase__ ) -> int:
super().__init__(*lowercase__ , **lowercase__ )
requires_backends(self , '''vision''' )
self.check_model_type(lowercase__ )
def __call__(self , lowercase__ , **lowercase__ ) -> Tuple:
return super().__call__(lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
return {}, {}, {}
def lowerCAmelCase_ (self , lowercase__ ) -> List[Any]:
__UpperCAmelCase = load_image(lowercase__ )
__UpperCAmelCase = image.size
__UpperCAmelCase = self.image_processor(images=lowercase__ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase_ (self , lowercase__ ) -> str:
__UpperCAmelCase = self.model(**lowercase__ )
return model_outputs
def lowerCAmelCase_ (self , lowercase__ ) -> Any:
__UpperCAmelCase = model_outputs.predicted_depth
__UpperCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowercase__ )
__UpperCAmelCase = prediction.squeeze().cpu().numpy()
__UpperCAmelCase = (output * 255 / np.max(lowercase__ )).astype('''uint8''' )
__UpperCAmelCase = Image.fromarray(lowercase__ )
__UpperCAmelCase = {}
__UpperCAmelCase = predicted_depth
__UpperCAmelCase = depth
return output_dict
| 333
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333
| 1
|
import math
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: float )-> int:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 366
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase_ = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0xE_000
lowerCAmelCase_ = 0xE_001
lowerCAmelCase_ = 0xE_002
lowerCAmelCase_ = 0xE_003
lowerCAmelCase_ = 0xE_004
# Maps special codepoints to human-readable names.
lowerCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCamelCase : int=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : Any=chr(UpperCamelCase ) , UpperCamelCase : Union[str, Any]=chr(UpperCamelCase ) , UpperCamelCase : List[Any]=chr(UpperCamelCase ) , UpperCamelCase : List[str]=chr(UpperCamelCase ) , UpperCamelCase : int=False , UpperCamelCase : str=20_48 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
_snake_case : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
_snake_case : Optional[Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
_snake_case : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
_snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
_snake_case : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , model_max_length=UpperCamelCase , **UpperCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
_snake_case : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_snake_case : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_snake_case : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_snake_case : str = UNICODE_VOCAB_SIZE
_snake_case : Optional[Any] = len(self._special_codepoints )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self._unicode_vocab_size
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
return list(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
try:
return ord(UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
return "".join(UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
_snake_case : Any = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
_snake_case : int = [1] + ([0] * len(UpperCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase )) + [1]
return result
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
_snake_case : Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
return ()
| 260
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ :int = logging.get_logger(__name__)
A_ :Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
A_ :Dict = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
A_ :str = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ) -> List[Any]:
__UpperCamelCase : Tuple =(
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
__UpperCamelCase : List[str] =bs[:]
__UpperCamelCase : Any =0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
__UpperCamelCase : Any =[chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase ,_UpperCAmelCase ) )
def A ( a_ ) -> Dict:
__UpperCamelCase : Any =set()
__UpperCamelCase : List[str] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase : Optional[int] =char
return pairs
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Any =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__UpperCamelCase : Union[str, Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__UpperCamelCase : List[str] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__UpperCamelCase : Tuple =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__UpperCamelCase : Tuple =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase : Tuple =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase : int =json.load(lowerCamelCase__ )
__UpperCamelCase : Tuple ={v: k for k, v in self.encoder.items()}
__UpperCamelCase : List[str] =errors # how to handle errors in decoding
__UpperCamelCase : Dict =bytes_to_unicode()
__UpperCamelCase : Union[str, Any] ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='utf-8' ) as merges_handle:
__UpperCamelCase : List[Any] =merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase : Optional[int] =[tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase : Optional[int] =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Union[str, Any] ={}
__UpperCamelCase : Tuple =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase : List[Any] =re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def __lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__UpperCamelCase : Optional[int] =tuple(lowerCamelCase__ )
__UpperCamelCase : List[str] =get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__UpperCamelCase : Tuple =min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =bigram
__UpperCamelCase : int =[]
__UpperCamelCase : List[Any] =0
while i < len(lowerCamelCase__ ):
try:
__UpperCamelCase : Dict =word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase : str =j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase : Optional[Any] =tuple(lowerCamelCase__ )
__UpperCamelCase : int =new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__UpperCamelCase : Union[str, Any] =get_pairs(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =' '.join(lowerCamelCase__ )
__UpperCamelCase : Dict =word
return word
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =[]
for token in re.findall(self.pat , lowerCamelCase__ ):
__UpperCamelCase : Optional[Any] =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Dict =''.join(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Dict =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Tuple =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '\n' )
__UpperCamelCase : List[Any] =0
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase : Dict =token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : Optional[Any] =[self.cls_token_id]
__UpperCamelCase : Optional[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.sep_token_id]
__UpperCamelCase : Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__UpperCamelCase : List[Any] =' ' + text
return (text, kwargs)
| 71
|
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[int]=1.0 , lowercase : Optional[int]=None , lowercase : str=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Optional[Any] , A_ : Dict=7 , A_ : List[str]=400 , A_ : List[str]=2000 , A_ : str=2048 , A_ : Optional[Any]=128 , A_ : Tuple=1 , A_ : str=512 , A_ : Optional[int]=30 , A_ : int=44100 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = spectrogram_length
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_audio_channels
lowerCamelCase_ = hop_length
lowerCamelCase_ = chunk_length
lowerCamelCase_ = sampling_rate
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self : int , A_ : Tuple=False , A_ : int=False ) -> Dict:
"""simple docstring"""
def _flatten(A_ : Dict ):
return list(itertools.chain(*A_ ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = TvltFeatureExtractor
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TvltFeatureExtractionTester(self )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(A_ , 'feature_size' ) )
self.assertTrue(hasattr(A_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(A_ , 'hop_length' ) )
self.assertTrue(hasattr(A_ , 'chunk_length' ) )
self.assertTrue(hasattr(A_ , 'sampling_rate' ) )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(A_ )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop('mel_filters' )
lowerCamelCase_ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(A_ )
lowerCamelCase_ = feat_extract_first.to_dict()
lowerCamelCase_ = feat_extract_second.to_dict()
lowerCamelCase_ = dict_first.pop('mel_filters' )
lowerCamelCase_ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCamelCase_ = feature_extractor(
A_ , return_tensors='np' , sampling_rate=44100 , mask_audio=A_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(A_ )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self : Union[str, Any] , A_ : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = TvltFeatureExtractor()
lowerCamelCase_ = feature_extractor(A_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCamelCase_ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A_ , atol=1E-4 ) )
| 208
|
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
lowerCamelCase_ = ''
while len(lowercase ) % 3 != 0:
lowerCamelCase_ = '0' + bin_string
lowerCamelCase_ = [
bin_string[index : index + 3]
for index in range(len(lowercase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase_ = 0
for index, val in enumerate(lowercase ):
oct_val += int(2 ** (2 - index) * int(lowercase ) )
oct_string += str(lowercase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208
| 1
|
'''simple docstring'''
from math import pow, sqrt
def lowercase_ ( *_lowercase ) -> bool:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = len(_lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def lowercase_ ( _lowercase , _lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowercase , _lowercase , _lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 318
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def a ( lowerCamelCase__ ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCAmelCase :
def _a (self , lowercase , lowercase ):
pass
def _a (self ):
pass
def _a (self ):
pass
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
A_ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase )
A_ : Any = TFVisionTextDualEncoderModel(lowercase )
A_ : Union[str, Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
A_ : Any = self.get_vision_text_model(lowercase , lowercase )
A_ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase )
A_ : int = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
A_ : str = self.get_vision_text_model(lowercase , lowercase )
A_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
A_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase )
A_ : str = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
A_ : List[str] = self.get_vision_text_model(lowercase , lowercase )
A_ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase )
A_ : Optional[int] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
A_ : List[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
A_ : Any = TFVisionTextDualEncoderModel.from_pretrained(lowercase )
A_ : Optional[int] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
A_ : Dict = after_output[0].numpy()
A_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase , 1E-5 )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
A_ : Dict = self.get_vision_text_model(lowercase , lowercase )
A_ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase )
A_ : Any = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase )
A_ : Any = output.vision_model_output.attentions
self.assertEqual(len(lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Tuple = to_atuple(vision_model.config.image_size )
A_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
A_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A_ : Union[str, Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A_ : Dict = output.text_model_output.attentions
self.assertEqual(len(lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a (self ):
A_ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase )
def _a (self ):
A_ : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase )
def _a (self ):
A_ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase )
def _a (self ):
A_ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**lowercase )
def _a (self ):
A_ : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase )
@slow
def _a (self ):
A_ : int = self.get_pretrained_model_and_inputs()
A_ : Union[str, Any] = model_a(**lowercase )
A_ : List[str] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase )
A_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase )
A_ : Union[str, Any] = model_a(**lowercase )
A_ : Dict = after_outputs[0].numpy()
A_ : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase , 1E-5 )
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
def _a (self ):
A_ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A_ : Dict = 13
A_ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A_ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A_ : Optional[Any] = random_attention_mask([batch_size, 4] )
A_ : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a (self , lowercase , lowercase ):
A_ : Union[str, Any] = TFViTModel(lowercase , name="""vision_model""" )
A_ : str = TFBertModel(lowercase , name="""text_model""" )
return vision_model, text_model
def _a (self ):
A_ : Optional[int] = TFViTModelTester(self )
A_ : Union[str, Any] = TFBertModelTester(self )
A_ : List[str] = vit_model_tester.prepare_config_and_inputs()
A_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
A_ : int = vision_config_and_inputs
(
A_
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
def _a (self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A_ : List[Any] = 13
A_ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A_ : str = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A_ : Tuple = random_attention_mask([batch_size, 4] )
A_ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
A_ : str = self.get_vision_text_model(lowercase , lowercase )
A_ : int = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase )
A_ : Dict = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase )
A_ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A_ : List[str] = to_atuple(vision_model.config.image_size )
A_ : int = to_atuple(vision_model.config.patch_size )
A_ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A_ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A_ : Tuple = output.text_model_output.attentions
self.assertEqual(len(lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a (self , lowercase , lowercase ):
A_ : Optional[Any] = TFDeiTModel(lowercase , name="""vision_model""" )
A_ : Union[str, Any] = TFRobertaModel(lowercase , name="""text_model""" )
return vision_model, text_model
def _a (self ):
A_ : Optional[int] = TFDeiTModelTester(self )
A_ : Optional[int] = TFRobertaModelTester(self )
A_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
A_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
A_ : Tuple = vision_config_and_inputs
(
A_
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
def _a (self ):
A_ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A_ : Union[str, Any] = 13
A_ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A_ : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A_ : List[Any] = random_attention_mask([batch_size, 4] )
A_ : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a (self , lowercase , lowercase ):
A_ : Optional[Any] = TFCLIPVisionModel(lowercase , name="""vision_model""" )
A_ : int = TFBertModel(lowercase , name="""text_model""" )
return vision_model, text_model
def _a (self ):
A_ : Optional[int] = TFCLIPVisionModelTester(self )
A_ : List[str] = TFBertModelTester(self )
A_ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
A_ : List[Any] = bert_model_tester.prepare_config_and_inputs()
A_ : int = vision_config_and_inputs
(
A_
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=lowercase )
A_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ : Optional[Any] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase , padding=lowercase , return_tensors="""np""" )
A_ : Optional[int] = model(**lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A_ : int = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase , atol=1E-3 ) )
| 350
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Dict = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 135
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "facebook/bart-large-mnli"
__UpperCamelCase = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase = "text_classifier"
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSequenceClassification
__UpperCamelCase = ["text", ["text"]]
__UpperCamelCase = ["text"]
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
super().setup()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model.config
SCREAMING_SNAKE_CASE_ : List[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
SCREAMING_SNAKE_CASE_ : List[str] = int(lowercase_)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = labels
return self.pre_processor(
[text] * len(lowercase_) , [F'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 91
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('''foo.json''',)])
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''')
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_model_config(lowercase_)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = GenerationConfig()
SCREAMING_SNAKE_CASE_ : Any = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = generation_config.update(**lowercase_)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {'''foo''': '''bar'''})
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig()
SCREAMING_SNAKE_CASE_ : List[str] = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''') as tmp_dir:
generation_config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = GenerationConfig.from_pretrained(lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = GenerationConfig.from_model_config(lowercase_)
assert not hasattr(lowercase_ , '''foo''') # no new kwargs should be initialized if from config
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , lowercase_)
self.assertEqual(default_config.num_beams , 1)
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , lowercase_)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''')
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : int = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''test-generation-config''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Optional[int] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=lowercase_ , use_auth_token=self._token)
SCREAMING_SNAKE_CASE_ : Any = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
| 91
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''timesformer'''
def __init__( self , lowercase=2_2_4 , lowercase=1_6 , lowercase=3 , lowercase=8 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-6 , lowercase=True , lowercase="divided_space_time" , lowercase=0 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Union[str, Any] = image_size
A_ : Union[str, Any] = patch_size
A_ : List[Any] = num_channels
A_ : Any = num_frames
A_ : Tuple = hidden_size
A_ : str = num_hidden_layers
A_ : str = num_attention_heads
A_ : str = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : Any = qkv_bias
A_ : Union[str, Any] = attention_type
A_ : str = drop_path_rate
| 192
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase = """hopper-medium-v2"""
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1000
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 192
| 1
|
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = XLMProphetNetTokenizer
lowercase = False
lowercase = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = '[PAD]'
__UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'Hello World!'
__UpperCamelCase = [3_5389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 316
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( snake_case :str = "laptop" ) -> DataFrame:
__UpperCamelCase = f'https://www.amazon.in/laptop/s?k={product}'
__UpperCamelCase = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
__UpperCamelCase = BeautifulSoup(requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
__UpperCamelCase = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
__UpperCamelCase = item.ha.text
__UpperCamelCase = 'https://www.amazon.in/' + item.ha.a['href']
__UpperCamelCase = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
__UpperCamelCase = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
__UpperCamelCase = 'Not available'
try:
__UpperCamelCase = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
__UpperCamelCase = ''
try:
__UpperCamelCase = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 1_0_0 )
except ValueError:
__UpperCamelCase = float('nan' )
except AttributeError:
pass
__UpperCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__UpperCamelCase = ' '
__UpperCamelCase = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCamelCase : int = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 366
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : List[Any] = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = CodeGenTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
if kwargs.pop('add_bos_token' , __UpperCAmelCase ):
__UpperCamelCase = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = super().decode(
token_ids=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , )
if truncate_before_pattern is not None and len(__UpperCAmelCase ) > 0:
__UpperCamelCase = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
return decoded_text
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
def find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = pattern.search(__UpperCAmelCase , __UpperCAmelCase )
return m.start() if m else -1
__UpperCamelCase = [re.compile(__UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCamelCase = list(re.finditer('^print' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: prints[1].start()]
__UpperCamelCase = list(re.finditer('^def' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: defs[1].start()]
__UpperCamelCase = 0
__UpperCamelCase = [
pos for pos in [find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(__UpperCAmelCase ) > 0:
return completion[: min(__UpperCAmelCase )]
else:
return completion
| 263
| 0
|
"""simple docstring"""
import argparse
__snake_case = '''docs/source/_static/js/custom.js'''
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
with open(_lowerCAmelCase, encoding='''utf-8''', newline='''\n''' ) as f:
_a = f.readlines()
_a = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
_a = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(_lowerCAmelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
__snake_case = parser.parse_args()
update_custom_js(args.version)
| 320
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
| 320
| 1
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 352
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase : str = []
for i in range(__UpperCAmelCase ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : str = 2
@register_to_config
def __init__( self ,UpperCAmelCase_ = 10_00 ,UpperCAmelCase_ = 0.00085 ,UpperCAmelCase_ = 0.012 ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "epsilon" ,UpperCAmelCase_ = "linspace" ,UpperCAmelCase_ = 0 ,):
if trained_betas is not None:
_lowercase : str = torch.tensor(UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase : Optional[Any] = torch.linspace(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,UpperCAmelCase_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Any = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase : Tuple = 1.0 - self.betas
_lowercase : Dict = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
if schedule_timesteps is None:
_lowercase : Optional[int] = self.timesteps
_lowercase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase : Optional[Any] = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_lowercase : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_lowercase : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase__ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
_lowercase : str = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
_lowercase : Optional[Any] = self.sigmas[step_index]
else:
_lowercase : Dict = self.sigmas_interpol[step_index]
_lowercase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
_lowercase : List[str] = num_inference_steps
_lowercase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase : Dict = np.linspace(0 ,num_train_timesteps - 1 ,UpperCAmelCase_ ,dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : str = (np.arange(0 ,UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase : Optional[int] = (np.arange(UpperCAmelCase_ ,0 ,-step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase : Optional[Any] = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
_lowercase : List[str] = np.interp(UpperCAmelCase_ ,np.arange(0 ,len(UpperCAmelCase_ ) ) ,UpperCAmelCase_ )
_lowercase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase : Any = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
_lowercase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_lowercase : Tuple = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=torch.floataa )
else:
_lowercase : str = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
_lowercase : int = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ ,dtype=timesteps.dtype )
_lowercase : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase : Optional[Any] = defaultdict(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
# get log sigma
_lowercase : Optional[Any] = sigma.log()
# get distribution
_lowercase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase : Tuple = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase : List[Any] = low_idx + 1
_lowercase : int = self.log_sigmas[low_idx]
_lowercase : Any = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase : Any = (low - log_sigma) / (low - high)
_lowercase : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase : List[str] = (1 - w) * low_idx + w * high_idx
_lowercase : Optional[int] = t.view(sigma.shape )
return t
@property
def lowerCamelCase__ ( self ):
return self.sample is None
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = True ,):
_lowercase : Optional[int] = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_lowercase : str = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase : Any = self.sigmas[step_index]
_lowercase : Any = self.sigmas_interpol[step_index + 1]
_lowercase : Tuple = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase : Union[str, Any] = self.sigmas[step_index - 1]
_lowercase : int = self.sigmas_interpol[step_index]
_lowercase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase : Any = 0
_lowercase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase : str = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase : Optional[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase : Optional[Any] = sigma_next - sigma_hat
_lowercase : Any = self.sample
_lowercase : Optional[int] = None
_lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase : int = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_lowercase : str = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase : Any = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase : List[Any] = self.timesteps.to(original_samples.device )
_lowercase : Union[str, Any] = timesteps.to(original_samples.device )
_lowercase : List[Any] = [self.index_for_timestep(UpperCAmelCase_ ,UpperCAmelCase_ ) for t in timesteps]
_lowercase : Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase : List[Any] = sigma.unsqueeze(-1 )
_lowercase : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 336
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Optional[Any] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 222
|
def A ( lowercase ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 222
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = ['image_processor', 'tokenizer']
A : List[Any] = 'LayoutLMv3ImageProcessor'
A : Union[str, Any] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE__ , )
lowercase : int = kwargs.pop('''feature_extractor''' )
lowercase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
lowercase : List[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : Optional[int] = features['''words''']
lowercase : int = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# add pixel values
lowercase : Union[str, Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str = self.get_overflowing_images(SCREAMING_SNAKE_CASE__ , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Tuple = images
return encoded_inputs
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(SCREAMING_SNAKE_CASE__ )} and {len(SCREAMING_SNAKE_CASE__ )}""" )
return images_with_overflow
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCamelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 360
|
import os
import pytest
from attr import dataclass
__a = '''us-east-1''' # defaults region
@dataclass
class __SCREAMING_SNAKE_CASE :
A : str
A : str = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A : Union[str, Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
A : str = {**hyperparameters, 'max_steps': 1000}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"""{self.framework}-transfromers-test"""
@property
def __lowerCamelCase ( self ):
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 173
| 0
|
"""simple docstring"""
import math
def _snake_case ( ):
lowerCAmelCase : Union[str, Any] = input('''Enter message: ''' )
lowerCAmelCase : Optional[int] = int(input(f'''Enter key [2-{len(_snake_case ) - 1}]: ''' ) )
lowerCAmelCase : str = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase : Any = encrypt_message(_snake_case , _snake_case )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase : Union[str, Any] = decrypt_message(_snake_case , _snake_case )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + "|"}''' )
def _snake_case ( _snake_case : int , _snake_case : str ):
lowerCAmelCase : Optional[Any] = [''''''] * key
for col in range(_snake_case ):
lowerCAmelCase : Optional[Any] = col
while pointer < len(_snake_case ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_snake_case )
def _snake_case ( _snake_case : int , _snake_case : str ):
lowerCAmelCase : Union[str, Any] = math.ceil(len(_snake_case ) / key )
lowerCAmelCase : str = key
lowerCAmelCase : Any = (num_cols * num_rows) - len(_snake_case )
lowerCAmelCase : Dict = [''''''] * num_cols
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase : int = 0
row += 1
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[int] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __UpperCAmelCase ( A : Optional[int] ) -> int:
UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Optional[Any] = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
UpperCAmelCase_ : Dict = collections.defaultdict(A )
UpperCAmelCase_ : str = collections.defaultdict(A )
UpperCAmelCase_ : int = collections.defaultdict(A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(A ):
UpperCAmelCase_ : int = None
if _re_tf_models.match(A ) is not None:
UpperCAmelCase_ : Optional[Any] = tf_models
UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0]
elif _re_flax_models.match(A ) is not None:
UpperCAmelCase_ : int = flax_models
UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0]
elif _re_pt_models.match(A ) is not None:
UpperCAmelCase_ : Union[str, Any] = pt_models
UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0]
if lookup_dict is not None:
while len(A ) > 0:
if attr_name in model_prefix_to_model_type:
UpperCAmelCase_ : Optional[int] = True
break
# Try again after removing the last word in the name
UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] )
UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
UpperCAmelCase_ : List[Any] = list(A )
all_models.sort()
UpperCAmelCase_ : Dict = {'''model_type''': all_models}
UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models]
UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models]
UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
UpperCAmelCase_ : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
UpperCAmelCase_ : Any = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
UpperCAmelCase_ : int = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
UpperCAmelCase_ : Dict = '''AutoTokenizer'''
UpperCAmelCase_ : str = [processors[t] for t in all_models]
return pd.DataFrame(A )
def __UpperCAmelCase ( A : Optional[int] ) -> str:
UpperCAmelCase_ : int = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(A , A , A ):
# The type of pipeline may not exist in this framework
if not hasattr(A , A ):
continue
# First extract all model_names
UpperCAmelCase_ : List[str] = []
for name in getattr(A , A ).values():
if isinstance(A , A ):
model_names.append(A )
else:
model_names.extend(list(A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( A : int , A : Any ) -> Tuple:
UpperCAmelCase_ : Tuple = get_frameworks_table()
UpperCAmelCase_ : Any = Dataset.from_pandas(A )
UpperCAmelCase_ : str = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A )
UpperCAmelCase_ : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(A ) )
}
UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() )
UpperCAmelCase_ : Optional[Any] = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
UpperCAmelCase_ : Dict = Dataset.from_pandas(A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
UpperCAmelCase_ : List[str] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
UpperCAmelCase_ : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , )
def __UpperCAmelCase ( ) -> int:
UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS
UpperCAmelCase_ : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Dict = model[0]
UpperCAmelCase_ : Any = model.__name__
if model not in in_table.values():
missing.append(A )
if len(A ) > 0:
UpperCAmelCase_ : List[Any] = ''', '''.join(A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = PhobertTokenizer
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : str = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
__A : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : List[Any] = ['#version: 0.2', 'l à</w>']
__A : Dict = {'unk_token': '<unk>'}
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = 'Tôi là VinAI Research'
__A : Union[str, Any] = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__A : Optional[Any] = 'Tôi là VinAI Research'
__A : Union[str, Any] = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
__A : List[str] = tokenizer.tokenize(_UpperCAmelCase)
print(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = tokens + [tokenizer.unk_token]
__A : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , _UpperCAmelCase)
| 190
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str ) -> str:
__A : Optional[Any] = [0] * len(__snake_case )
__A : Dict = []
__A : Optional[int] = [1] * len(__snake_case )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__snake_case ) ):
if indegree[i] == 0:
queue.append(__snake_case )
while queue:
__A : int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__A : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__snake_case )
print(max(__snake_case ) )
# Adjacency list of Graph
lowercase__ : Dict = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 190
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCamelCase_ : Union[str, Any] = get_logger(__name__)
lowerCamelCase_ : Any = Path(__file__).parent / """model_card_template.md"""
lowerCamelCase_ : Optional[int] = uuida().hex
lowerCamelCase_ : str = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase_ : Tuple = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase_ : Dict = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _A ( lowercase = None ):
"""simple docstring"""
a =f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def _A ( lowercase , lowercase = None , lowercase = None ):
"""simple docstring"""
if token is None:
a =HfFolder.get_token()
if organization is None:
a =whoami(_SCREAMING_SNAKE_CASE )['''name''']
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def _A ( lowercase , lowercase ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_SCREAMING_SNAKE_CASE , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
a =args.hub_token if hasattr(_SCREAMING_SNAKE_CASE , '''hub_token''' ) else None
a =get_full_repo_name(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
a =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , repo_name=_SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(_SCREAMING_SNAKE_CASE , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_SCREAMING_SNAKE_CASE , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_SCREAMING_SNAKE_CASE , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_SCREAMING_SNAKE_CASE , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_SCREAMING_SNAKE_CASE , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_SCREAMING_SNAKE_CASE , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_SCREAMING_SNAKE_CASE , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_SCREAMING_SNAKE_CASE , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_SCREAMING_SNAKE_CASE , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
a =os.path.join(args.output_dir , '''README.md''' )
model_card.save(_SCREAMING_SNAKE_CASE )
def _A ( lowercase , lowercase = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
a =str(Path(_SCREAMING_SNAKE_CASE ).as_posix() )
a =re.search(R'''snapshots/([^/]+)/''' , _SCREAMING_SNAKE_CASE )
if search is None:
return None
a =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCamelCase_ : int = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowerCamelCase_ : Any = os.path.join(hf_cache_home, """diffusers""")
def _A ( lowercase = None , lowercase = None ):
"""simple docstring"""
if new_cache_dir is None:
a =DIFFUSERS_CACHE
if old_cache_dir is None:
a =old_diffusers_cache
a =Path(_SCREAMING_SNAKE_CASE ).expanduser()
a =Path(_SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a =new_cache_dir / old_blob_path.relative_to(_SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
os.symlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCamelCase_ : List[Any] = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowerCamelCase_ : Any = 0
else:
with open(cache_version_file) as f:
try:
lowerCamelCase_ : List[Any] = int(f.read())
except ValueError:
lowerCamelCase_ : List[str] = 0
if cache_version < 1:
lowerCamelCase_ : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowerCamelCase_ : str = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"""the directory exists and can be written to."""
)
def _A ( lowercase , lowercase = None ):
"""simple docstring"""
if variant is not None:
a =weights_name.split('''.''' )
a =splits[:-1] + [variant] + splits[-1:]
a ='''.'''.join(_SCREAMING_SNAKE_CASE )
return weights_name
def _A ( lowercase , *,
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None , ):
"""simple docstring"""
a =str(_SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(_SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
a =os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
a =os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse('''0.20.0''' )
):
try:
a =hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}\' so that the correct variant file can be added.''' , _SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
a =hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 81
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowercase_ = '''blenderbot-small'''
lowercase_ = ['''past_key_values''']
lowercase_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self : Any , UpperCAmelCase_ : Union[str, Any]=50_265 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Tuple=2_048 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Dict=2 , **UpperCAmelCase_ : Optional[int] , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =vocab_size
lowerCamelCase__: Optional[Any] =max_position_embeddings
lowerCamelCase__: str =d_model
lowerCamelCase__: Optional[Any] =encoder_ffn_dim
lowerCamelCase__: Tuple =encoder_layers
lowerCamelCase__: List[Any] =encoder_attention_heads
lowerCamelCase__: int =decoder_ffn_dim
lowerCamelCase__: List[Any] =decoder_layers
lowerCamelCase__: Optional[Any] =decoder_attention_heads
lowerCamelCase__: Optional[int] =dropout
lowerCamelCase__: int =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: List[str] =activation_function
lowerCamelCase__: Dict =init_std
lowerCamelCase__: Optional[Any] =encoder_layerdrop
lowerCamelCase__: Optional[int] =decoder_layerdrop
lowerCamelCase__: str =use_cache
lowerCamelCase__: Union[str, Any] =encoder_layers
lowerCamelCase__: Optional[int] =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
class _SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__: Optional[Any] =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
lowerCamelCase__: Tuple ={0: "batch"}
lowerCamelCase__: str ={0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__: List[str] ={0: "batch", 1: "decoder_sequence"}
lowerCamelCase__: Union[str, Any] ={0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__: Any =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.num_layers
for i in range(__snake_case):
lowerCamelCase__: List[str] ={0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__: List[Any] ={0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase__: str =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__: Optional[Any] =super().outputs
else:
lowerCamelCase__: List[str] =super(__snake_case , self).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__: Tuple =self.num_layers
for i in range(__snake_case):
lowerCamelCase__: str ={0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__: Any ={0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Generate decoder inputs
lowerCamelCase__: Optional[Any] =seq_length if not self.use_past else 1
lowerCamelCase__: List[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
lowerCamelCase__: Union[str, Any] ={F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__: List[str] =dict(**__snake_case , **__snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
lowerCamelCase__ , lowerCamelCase__: List[str] =common_inputs["input_ids"].shape
lowerCamelCase__: Any =common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =self.num_attention_heads
lowerCamelCase__: List[Any] =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__: Optional[int] =decoder_seq_length + 3
lowerCamelCase__: str =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__: Any =torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__snake_case , __snake_case)] , dim=1)
lowerCamelCase__: Optional[Any] =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__: Any =self.num_layers
lowerCamelCase__: str =min(__snake_case , __snake_case)
lowerCamelCase__: Union[str, Any] =max(__snake_case , __snake_case) - min_num_layers
lowerCamelCase__: Tuple ="encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__snake_case):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case),
torch.zeros(__snake_case),
torch.zeros(__snake_case),
torch.zeros(__snake_case),
))
# TODO: test this.
lowerCamelCase__: Union[str, Any] =encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__snake_case , __snake_case):
common_inputs["past_key_values"].append((torch.zeros(__snake_case), torch.zeros(__snake_case)))
return common_inputs
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
lowerCamelCase__ , lowerCamelCase__: str =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__: Tuple =seqlen + 2
lowerCamelCase__ , lowerCamelCase__: Any =self.num_layers
lowerCamelCase__ , lowerCamelCase__: List[str] =self.num_attention_heads
lowerCamelCase__: Optional[Any] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__: List[str] =common_inputs["attention_mask"].dtype
lowerCamelCase__: Optional[int] =torch.cat(
[common_inputs["attention_mask"], torch.ones(__snake_case , __snake_case , dtype=__snake_case)] , dim=1)
lowerCamelCase__: Union[str, Any] =[
(torch.zeros(__snake_case), torch.zeros(__snake_case)) for _ in range(__snake_case)
]
return common_inputs
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__: Dict =tokenizer.num_special_tokens_to_add(__snake_case)
lowerCamelCase__: Optional[int] =compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case)
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__: int =[" ".join([tokenizer.unk_token]) * seq_length] * batch_size
lowerCamelCase__: List[Any] =dict(tokenizer(__snake_case , return_tensors=__snake_case))
return common_inputs
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__: List[str] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
elif self.task == "causal-lm":
lowerCamelCase__: Tuple =self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
else:
lowerCamelCase__: Tuple =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case)
return common_inputs
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]) ->List[str]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__: List[Any] =super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case)
else:
lowerCamelCase__: Tuple =super(__snake_case , self)._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case)
| 371
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__A = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=UpperCAmelCase_ , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =logging.get_logger("transformers-cli/converting")
self._logger.info(F"""Loading model {model_type}""")
lowerCamelCase__: Any =model_type
lowerCamelCase__: Optional[int] =tf_checkpoint
lowerCamelCase__: Any =pytorch_dump_output
lowerCamelCase__: Union[str, Any] =config
lowerCamelCase__: str =finetuning_task_name
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase__: Tuple =self._tf_checkpoint
lowerCamelCase__: List[str] =""
else:
lowerCamelCase__: Any =self._tf_checkpoint
lowerCamelCase__: Dict =""
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase_ , self._config , self._pytorch_dump_output , UpperCAmelCase_)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 273
| 0
|
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a_ : int = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a_ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
if "://" in dataset_path:
lowerCamelCase_ =dataset_path.split('''://''' )[1]
return dataset_path
def a_ ( __snake_case : fsspec.AbstractFileSystem ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def a_ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =threading.Lock()
| 75
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_A : List[Any] = 299792458
# Symbols
_A , _A , _A , _A : Union[str, Any] = symbols('''ct x y z''')
def UpperCamelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def UpperCamelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(snake_case_ ) ** 2 )
def UpperCamelCase_ ( snake_case_ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(snake_case_ ), -gamma(snake_case_ ) * beta(snake_case_ ), 0, 0],
[-gamma(snake_case_ ) * beta(snake_case_ ), gamma(snake_case_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase_ ( snake_case_ : float , snake_case_ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
__lowerCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(snake_case_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_A : str = transform(29979245)
print('''Example of four vector: ''')
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
_A : int = {ct: c, x: 1, y: 1, z: 1}
_A : Any = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 229
| 0
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase_ : int = 1.6_0_2_1E-1_9 # units = C
def _A ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = IFInpaintingSuperResolutionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> Optional[int]:
if str(__A ).startswith('''mps''' ):
a =torch.manual_seed(__A )
else:
a =torch.Generator(device=__A ).manual_seed(__A )
a =floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 215
| 1
|
import math
from numpy import inf
from scipy.integrate import quad
def _a ( SCREAMING_SNAKE_CASE_ : float ):
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
__A : str = np.array(Image.open(lena_path))
# kernel to be applied
__A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__A : Optional[Any] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 260
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
_lowerCamelCase, _lowerCamelCase : Tuple = coefficient_matrix.shape
_lowerCamelCase, _lowerCamelCase : Dict = constant_matrix.shape
if rowsa != colsa:
_lowerCamelCase : Dict = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowercase__ )
if colsa != 1:
_lowerCamelCase : Any = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowercase__ )
if rowsa != rowsa:
_lowerCamelCase : Any = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowercase__ )
if len(lowercase__ ) != rowsa:
_lowerCamelCase : List[str] = (
'Number of initial values must be equal to number of rows in coefficient '
f'''matrix but received {len(lowercase__ )} and {rowsa}'''
)
raise ValueError(lowercase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_lowerCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowerCamelCase, _lowerCamelCase : Any = table.shape
strictly_diagonally_dominant(lowercase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowercase__ ):
_lowerCamelCase : List[str] = []
for row in range(lowercase__ ):
_lowerCamelCase : Optional[int] = 0
for col in range(lowercase__ ):
if col == row:
_lowerCamelCase : Any = table[row][col]
elif col == cols - 1:
_lowerCamelCase : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCamelCase : List[str] = (temp + val) / denom
new_val.append(lowercase__ )
_lowerCamelCase : List[Any] = new_val
return [float(lowercase__ ) for i in new_val]
def _snake_case ( lowercase__ ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = table.shape
_lowerCamelCase : List[str] = True
for i in range(0 , lowercase__ ):
_lowerCamelCase : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 12
| 1
|
"""simple docstring"""
__A = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : str , lowercase__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case : Tuple = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(lowercase__ )}'''
)
raise ValueError(lowercase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 13 , SCREAMING_SNAKE_CASE = 64 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 37 , SCREAMING_SNAKE_CASE = "gelu" , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : List[str] = image_size
snake_case : int = patch_size
snake_case : int = num_channels
snake_case : Any = is_training
snake_case : int = use_labels
snake_case : Optional[Any] = hidden_size
snake_case : str = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : Dict = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : List[Any] = type_sequence_label_size
snake_case : Optional[Any] = initializer_range
snake_case : Any = encoder_stride
snake_case : Tuple = num_attention_outputs
snake_case : Dict = embed_dim
snake_case : Optional[Any] = embed_dim + 1
snake_case : Any = resolution
snake_case : int = depths
snake_case : int = hidden_sizes
snake_case : int = dim
snake_case : Tuple = mlp_expansion_ratio
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[int] = None
if self.use_labels:
snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = TFEfficientFormerModel(config=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = self.type_sequence_label_size
snake_case : Tuple = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE )
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : Tuple = 1
snake_case : Any = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE )
snake_case : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Tuple = config_and_inputs
snake_case : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
a__ : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a__ : int = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a__ : int = False
a__ : List[str] = False
a__ : Union[str, Any] = False
a__ : Optional[Any] = False
a__ : str = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = TFEfficientFormerModelTester(self )
snake_case : Dict = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(SCREAMING_SNAKE_CASE )
snake_case : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[int] = [*signature.parameters.keys()]
snake_case : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : List[str] = model_class(SCREAMING_SNAKE_CASE )
snake_case : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , training=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case : Optional[int] = seq_length * self.model_tester.chunk_length
else:
snake_case : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
snake_case : Tuple = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE )
snake_case : Tuple = getattr(self.model_tester , "decoder_seq_length" , SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = TFEfficientFormerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = True
snake_case : Tuple = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = getattr(self.model_tester , "key_length" , SCREAMING_SNAKE_CASE )
snake_case : Tuple = getattr(self.model_tester , "chunk_length" , SCREAMING_SNAKE_CASE )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case : Optional[int] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case : Optional[int] = True
snake_case : List[Any] = False
snake_case : Optional[int] = True
snake_case : List[str] = model_class(SCREAMING_SNAKE_CASE )
snake_case : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , training=SCREAMING_SNAKE_CASE )
snake_case : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Tuple = True
snake_case : List[str] = model_class(SCREAMING_SNAKE_CASE )
snake_case : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , training=SCREAMING_SNAKE_CASE )
snake_case : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=SCREAMING_SNAKE_CASE )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case : Any = model(SCREAMING_SNAKE_CASE )
self.assertTrue(outputs_dict is not None )
def UpperCamelCase__ ( ):
snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case : List[Any] = self.default_image_processor
snake_case : Optional[Any] = prepare_img()
snake_case : int = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# verify the logits
snake_case : int = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
snake_case : Dict = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case : int = self.default_image_processor
snake_case : List[Any] = prepare_img()
snake_case : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
snake_case : Any = model(**SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# verify the logits
snake_case : Any = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 148
| 1
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : int ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase :Any = np.nan
for i in range(__magic_name__ ):
UpperCamelCase :Dict = features[:, labels == i]
UpperCamelCase :Dict = data.mean(1 )
# Centralize the data of class i
UpperCamelCase :Optional[int] = data - column_reshape(__magic_name__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__magic_name__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase :str = np.dot(__magic_name__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : int ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase :str = features.mean(1 )
UpperCamelCase :List[str] = np.nan
for i in range(__magic_name__ ):
UpperCamelCase :List[str] = features[:, labels == i]
UpperCamelCase :Union[str, Any] = data.shape[1]
UpperCamelCase :Any = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__magic_name__ ) - column_reshape(__magic_name__ ) , (column_reshape(__magic_name__ ) - column_reshape(__magic_name__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase :Optional[int] = device_data * np.dot(
column_reshape(__magic_name__ ) - column_reshape(__magic_name__ ) , (column_reshape(__magic_name__ ) - column_reshape(__magic_name__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray , __magic_name__ : int ) -> np.ndarray:
"""simple docstring"""
if features.any():
UpperCamelCase :Union[str, Any] = features.mean(1 )
# Center the dataset
UpperCamelCase :Tuple = features - np.reshape(__magic_name__ , (data_mean.size, 1) )
UpperCamelCase :Tuple = np.dot(__magic_name__ , centered_data.T ) / features.shape[1]
UpperCamelCase , UpperCamelCase :List[Any] = np.linalg.eigh(__magic_name__ )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase :str = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase :Union[str, Any] = np.dot(filtered_eigenvectors.T , __magic_name__ )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__magic_name__ )
logging.error("""Dataset empty""" )
raise AssertionError
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : int ) -> np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase , UpperCamelCase :Dict = eigh(
covariance_between_classes(__magic_name__ , __magic_name__ , __magic_name__ ) , covariance_within_classes(__magic_name__ , __magic_name__ , __magic_name__ ) , )
UpperCamelCase :int = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = np.linalg.svd(__magic_name__ )
UpperCamelCase :List[str] = svd_matrix[:, 0:dimensions]
UpperCamelCase :int = np.dot(filtered_svd_matrix.T , __magic_name__ )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__magic_name__ )
logging.error("""Dataset empty""" )
raise AssertionError
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
UpperCamelCase :Optional[Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase :Optional[Any] = np.array([0, 0, 0, 1, 1] )
UpperCamelCase :Dict = 2
UpperCamelCase :Any = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__magic_name__ ) as error_info:
UpperCamelCase :Optional[int] = linear_discriminant_analysis(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if isinstance(__magic_name__ , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
UpperCamelCase :Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase :List[Any] = 2
UpperCamelCase :Tuple = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(__magic_name__ ) as error_info:
UpperCamelCase :Optional[Any] = principal_component_analysis(__magic_name__ , __magic_name__ )
if not np.allclose(__magic_name__ , __magic_name__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any=7 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[Any]=30 , __lowerCamelCase : Union[str, Any]=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : int=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=1 / 255 , __lowerCamelCase : str=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCamelCase :List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
UpperCamelCase :Tuple = parent
UpperCamelCase :int = batch_size
UpperCamelCase :str = num_channels
UpperCamelCase :Dict = min_resolution
UpperCamelCase :Any = max_resolution
UpperCamelCase :int = do_resize
UpperCamelCase :str = size
UpperCamelCase :Dict = do_normalize
UpperCamelCase :Tuple = image_mean
UpperCamelCase :Optional[int] = image_std
UpperCamelCase :Tuple = do_rescale
UpperCamelCase :Optional[Any] = rescale_factor
UpperCamelCase :List[Any] = do_pad
def _A ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=False ):
if not batched:
UpperCamelCase :Optional[Any] = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
UpperCamelCase , UpperCamelCase :Union[str, Any] = image.size
else:
UpperCamelCase , UpperCamelCase :Optional[int] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase :int = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase :Tuple = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase :List[Any] = self.size["""shortest_edge"""]
UpperCamelCase :str = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase :List[Any] = self.size["""shortest_edge"""]
UpperCamelCase :str = self.size["""shortest_edge"""]
else:
UpperCamelCase :List[Any] = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase :int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase :int = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
UpperCamelCase :Tuple = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Optional[int] = DeformableDetrImageProcessor if is_vision_available() else None
def _A ( self : Optional[Any] ):
UpperCamelCase :str = DeformableDetrImageProcessingTester(self )
@property
def _A ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Dict ):
UpperCamelCase :int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_pad""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
def _A ( self : str ):
UpperCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
UpperCamelCase :int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def _A ( self : List[Any] ):
pass
def _A ( self : Dict ):
# Initialize image_processing
UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase :str = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
UpperCamelCase :int = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : Tuple ):
# Initialize image_processing
UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Any = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Dict = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : Any ):
# Initialize image_processing
UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _A ( self : Optional[Any] ):
# prepare image and target
UpperCamelCase :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase :str = json.loads(f.read() )
UpperCamelCase :List[Any] = {"""image_id""": 39_769, """annotations""": target}
# encode them
UpperCamelCase :Optional[int] = DeformableDetrImageProcessor()
UpperCamelCase :Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase :Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
UpperCamelCase :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) )
# verify boxes
UpperCamelCase :List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase )
UpperCamelCase :List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
UpperCamelCase :Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) )
# verify is_crowd
UpperCamelCase :List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) )
# verify class_labels
UpperCamelCase :Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) )
# verify orig_size
UpperCamelCase :Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) )
# verify size
UpperCamelCase :int = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) )
@slow
def _A ( self : str ):
# prepare image, target and masks_path
UpperCamelCase :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase :Any = json.loads(f.read() )
UpperCamelCase :int = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
UpperCamelCase :Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase :Tuple = DeformableDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase :Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase :Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase )
UpperCamelCase :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
UpperCamelCase :List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) )
# verify boxes
UpperCamelCase :List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase )
UpperCamelCase :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
UpperCamelCase :str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) )
# verify is_crowd
UpperCamelCase :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) )
# verify class_labels
UpperCamelCase :List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) )
# verify masks
UpperCamelCase :Union[str, Any] = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __lowerCamelCase )
# verify orig_size
UpperCamelCase :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) )
# verify size
UpperCamelCase :str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) )
| 62
| 1
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
A__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
A__ , A__ = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
A__ = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
A__ , A__ = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
A__ = ['key_proj', 'value_proj', 'query_proj']
A__ = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
A__ = key.split('.' )
if attributes[0] == "lm_head":
A__ = prophet
A__ = prophet_old
else:
A__ = prophet.prophetnet
A__ = prophet_old.model
A__ = False
for attribute in attributes:
if attribute in mapping:
A__ = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
A__ = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
A__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A__ = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
A__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A__ = old_model.bias
logger.info(F'''{attribute} is initialized''' )
A__ = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
A__ = old_model.in_proj_weight.shape[0] // 3
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
A__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
A__ = True
break
if attribute.isdigit():
A__ = model[int(UpperCamelCase__ )]
A__ = old_model[int(UpperCamelCase__ )]
else:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
A__ = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 221
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = "ResNetConfig"
# Base docstring
__lowerCamelCase = "microsoft/resnet-50"
__lowerCamelCase = [1, 20_48, 7, 7]
# Image classification docstring
__lowerCamelCase = "microsoft/resnet-50"
__lowerCamelCase = "tiger cat"
__lowerCamelCase = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Any:
super().__init__()
A__ = nn.Convad(
__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=__UpperCAmelCase ,stride=__UpperCAmelCase ,padding=kernel_size // 2 ,bias=__UpperCAmelCase )
A__ = nn.BatchNormad(__UpperCAmelCase )
A__ = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = self.convolution(__UpperCAmelCase )
A__ = self.normalization(__UpperCAmelCase )
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ) -> Any:
super().__init__()
A__ = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A__ = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A__ = config.num_channels
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
A__ = self.embedder(__UpperCAmelCase )
A__ = self.pooler(__UpperCAmelCase )
return embedding
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ) -> Optional[Any]:
super().__init__()
A__ = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,stride=__UpperCAmelCase ,bias=__UpperCAmelCase )
A__ = nn.BatchNormad(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = self.convolution(__UpperCAmelCase )
A__ = self.normalization(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> int:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,activation=__UpperCAmelCase ) ,)
A__ = ACTaFN[activation]
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = hidden_state
A__ = self.layer(__UpperCAmelCase )
A__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ,__UpperCAmelCase = 4 ) -> int:
super().__init__()
A__ = in_channels != out_channels or stride != 1
A__ = out_channels // reduction
A__ = (
ResNetShortCut(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
A__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ) ,ResNetConvLayer(__UpperCAmelCase ,__UpperCAmelCase ,kernel_size=1 ,activation=__UpperCAmelCase ) ,)
A__ = ACTaFN[activation]
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = hidden_state
A__ = self.layer(__UpperCAmelCase )
A__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
A__ = self.activation(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,) -> Any:
super().__init__()
A__ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
A__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase ,__UpperCAmelCase ,stride=__UpperCAmelCase ,activation=config.hidden_act ) ,*[layer(__UpperCAmelCase ,__UpperCAmelCase ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def snake_case__ ( self ,__UpperCAmelCase ) -> Tensor:
A__ = input
for layer in self.layers:
A__ = layer(__UpperCAmelCase )
return hidden_state
class UpperCamelCase__( nn.Module ):
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
super().__init__()
A__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A__ = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCAmelCase ,config.depths[1:] ):
self.stages.append(ResNetStage(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,depth=__UpperCAmelCase ) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = True ) -> BaseModelOutputWithNoAttention:
A__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
A__ = stage_module(__UpperCAmelCase )
if output_hidden_states:
A__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase ,hidden_states=__UpperCAmelCase ,)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : str = ResNetConfig
lowerCAmelCase__ : str = 'resnet'
lowerCAmelCase__ : int = 'pixel_values'
lowerCAmelCase__ : Any = True
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
if isinstance(__UpperCAmelCase ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(__UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Any:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = value
__lowerCamelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCamelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __A , )
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Union[str, Any]:
super().__init__(__UpperCAmelCase )
A__ = config
A__ = ResNetEmbeddings(__UpperCAmelCase )
A__ = ResNetEncoder(__UpperCAmelCase )
A__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.embedder(__UpperCAmelCase )
A__ = self.encoder(
__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = encoder_outputs[0]
A__ = self.pooler(__UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase ,pooler_output=__UpperCAmelCase ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __A , )
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> Tuple:
super().__init__(__UpperCAmelCase )
A__ = config.num_labels
A__ = ResNetModel(__UpperCAmelCase )
# classification head
A__ = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def snake_case__ ( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> ImageClassifierOutputWithNoAttention:
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.resnet(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(__UpperCAmelCase )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = 'single_label_classification'
else:
A__ = 'multi_label_classification'
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(__UpperCAmelCase ,__UpperCAmelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase ,logits=__UpperCAmelCase ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __A , )
class UpperCamelCase__( __A , __A ):
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
super().__init__(__UpperCAmelCase )
super()._init_backbone(__UpperCAmelCase )
A__ = [config.embedding_size] + config.hidden_sizes
A__ = ResNetEmbeddings(__UpperCAmelCase )
A__ = ResNetEncoder(__UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@replace_return_docstrings(output_type=__UpperCAmelCase ,config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> BackboneOutput:
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = self.embedder(__UpperCAmelCase )
A__ = self.encoder(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,return_dict=__UpperCAmelCase )
A__ = outputs.hidden_states
A__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__UpperCAmelCase ,)
| 221
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def _snake_case ( _snake_case : Matrix , _snake_case : Matrix ) -> Tuple:
'''simple docstring'''
_A = len(__a )
_A = [[0 for _ in range(size + 1 )] for _ in range(__a )]
_A = 42
_A = 42
_A = 42
_A = 42
_A = 42
_A = 42
for row in range(__a ):
for col in range(__a ):
_A = matrix[row][col]
_A = vector[row][0]
_A = 0
_A = 0
while row < size and col < size:
# pivoting
_A = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__a , __a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __a ):
_A = augmented[rowa][col] / augmented[row][col]
_A = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __a ):
for row in range(__a ):
_A = augmented[row][col] / augmented[col][col]
for cola in range(__a , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__a )
]
def _snake_case ( _snake_case : list[int] ) -> int:
'''simple docstring'''
_A = len(__a )
_A = [[0 for _ in range(__a )] for _ in range(__a )]
_A = [[0] for _ in range(__a )]
_A = 42
_A = 42
_A = 42
_A = 42
for x_val, y_val in enumerate(__a ):
for col in range(__a ):
_A = (x_val + 1) ** (size - col - 1)
_A = y_val
_A = solve(__a , __a )
def interpolated_func(_snake_case : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__a ) )
return interpolated_func
def _snake_case ( _snake_case : int ) -> Tuple:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _snake_case ( _snake_case : Callable[[int], int] = question_function , _snake_case : int = 10 ) -> Optional[Any]:
'''simple docstring'''
_A = [func(__a ) for x_val in range(1 , order + 1 )]
_A = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_A = 0
_A = 42
_A = 42
for poly in polynomials:
_A = 1
while func(__a ) == poly(__a ):
x_val += 1
ret += poly(__a )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 355
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _snake_case ( _snake_case : str , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple=None ) -> List[str]:
'''simple docstring'''
_A = XLNetConfig.from_json_file(_snake_case )
_A = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A = finetuning_task
_A = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A = XLNetForSequenceClassification(_snake_case )
elif "squad" in finetuning_task:
_A = finetuning_task
_A = XLNetForQuestionAnswering(_snake_case )
else:
_A = XLNetLMHeadModel(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
_A = os.path.join(_snake_case , _snake_case )
_A = os.path.join(_snake_case , _snake_case )
print(F'''Save PyTorch model to {os.path.abspath(_snake_case )}''' )
torch.save(model.state_dict() , _snake_case )
print(F'''Save configuration file to {os.path.abspath(_snake_case )}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
a = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 271
| 0
|
def _lowercase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCamelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 176
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCAmelCase , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase__ ( _UpperCAmelCase ):
def A_ ( self : Any , UpperCAmelCase_ : GenericTensor ):
if self.framework == "tf":
SCREAMING_SNAKE_CASE__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def A_ ( self : Optional[Any] , UpperCAmelCase_ : GenericTensor ):
SCREAMING_SNAKE_CASE__ = self.get_masked_index(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : GenericTensor ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] ):
if return_tensors is None:
SCREAMING_SNAKE_CASE__ = self.framework
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.ensure_exactly_one_mask_token(UpperCAmelCase_ )
return model_inputs
def A_ ( self : Tuple , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = self.model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model_inputs['input_ids']
return model_outputs
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE__ = target_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE__ = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE__ = outputs.numpy()
SCREAMING_SNAKE_CASE__ = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE__ = stable_softmax(UpperCAmelCase_ , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = tf.gather_nd(tf.squeeze(UpperCAmelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(UpperCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE__ = tf.math.top_k(UpperCAmelCase_ , k=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE__ = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE__ = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = probs[..., target_ids]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = probs.topk(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE__ = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE__ = target_ids[p].tolist()
SCREAMING_SNAKE_CASE__ = p
# Filter padding out:
SCREAMING_SNAKE_CASE__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(UpperCAmelCase_ )
result.append(UpperCAmelCase_ )
if single_mask:
return result[0]
return result
def A_ ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=None ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [targets]
try:
SCREAMING_SNAKE_CASE__ = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
for target in targets:
SCREAMING_SNAKE_CASE__ = vocab.get(UpperCAmelCase_ , UpperCAmelCase_ )
if id_ is None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , max_length=1 , truncation=UpperCAmelCase_ , )['input_ids']
if len(UpperCAmelCase_ ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE__ = np.array(UpperCAmelCase_ )
return target_ids
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None ):
SCREAMING_SNAKE_CASE__ = {}
if targets is not None:
SCREAMING_SNAKE_CASE__ = self.get_target_ids(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self : Tuple , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) == 1:
return outputs[0]
return outputs
| 176
| 1
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( a__ ):
"""simple docstring"""
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "AutoImageProcessor"
lowerCAmelCase__ = "AutoTokenizer"
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def __call__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = kwargs.pop("""images""" , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = kwargs.pop("""text""" , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self : Any , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Dict=None ) -> Optional[int]:
"""simple docstring"""
if added_vocab is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_added_vocab()
__SCREAMING_SNAKE_CASE = {}
while tokens:
__SCREAMING_SNAKE_CASE = re.search(r"""<s_(.*?)>""" , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__SCREAMING_SNAKE_CASE = start_token.group(1 )
__SCREAMING_SNAKE_CASE = re.search(rf'</s_{key}>' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
__SCREAMING_SNAKE_CASE = start_token.group()
if end_token is None:
__SCREAMING_SNAKE_CASE = tokens.replace(__SCREAMING_SNAKE_CASE , """""" )
else:
__SCREAMING_SNAKE_CASE = end_token.group()
__SCREAMING_SNAKE_CASE = re.escape(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = re.escape(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , __SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__SCREAMING_SNAKE_CASE = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__SCREAMING_SNAKE_CASE = self.tokenajson(__SCREAMING_SNAKE_CASE , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if value:
if len(__SCREAMING_SNAKE_CASE ) == 1:
__SCREAMING_SNAKE_CASE = value[0]
__SCREAMING_SNAKE_CASE = value
else: # leaf nodes
__SCREAMING_SNAKE_CASE = []
for leaf in content.split(r"""<sep/>""" ):
__SCREAMING_SNAKE_CASE = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__SCREAMING_SNAKE_CASE = leaf[1:-2] # for categorical special tokens
output[key].append(__SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__SCREAMING_SNAKE_CASE = output[key][0]
__SCREAMING_SNAKE_CASE = tokens[tokens.find(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__SCREAMING_SNAKE_CASE , added_vocab=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 370
|
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331
| 0
|
'''simple docstring'''
from math import factorial
class snake_case :
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : int ):
__UpperCamelCase = real
if isinstance(__A , __A ):
__UpperCamelCase = [1] * rank
else:
__UpperCamelCase = rank
def __repr__( self : Optional[Any] ):
return (
f'''{self.real}+'''
f'''{'+'.join(str(__A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def _lowerCamelCase ( self : int ):
__UpperCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __A )
def __add__( self : str , __A : Any ):
if not isinstance(__A , __A ):
return Dual(self.real + other , self.duals )
__UpperCamelCase = self.duals.copy()
__UpperCamelCase = other.duals.copy()
if len(__A ) > len(__A ):
o_dual.extend([1] * (len(__A ) - len(__A )) )
elif len(__A ) < len(__A ):
s_dual.extend([1] * (len(__A ) - len(__A )) )
__UpperCamelCase = []
for i in range(len(__A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __A )
SCREAMING_SNAKE_CASE_ : List[str] =__add__
def __sub__( self : int , __A : List[Any] ):
return self + other * -1
def __mul__( self : List[str] , __A : Optional[int] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __A )
__UpperCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __A )
SCREAMING_SNAKE_CASE_ : Any =__mul__
def __truediv__( self : Optional[Any] , __A : Optional[int] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __A )
raise ValueError
def __floordiv__( self : str , __A : List[str] ):
if not isinstance(__A , __A ):
__UpperCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __A )
raise ValueError
def __pow__( self : Optional[int] , __A : Optional[Any] ):
if n < 0 or isinstance(__A , __A ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__UpperCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase__ ( __lowercase : List[Any] , __lowercase : Tuple , __lowercase : int ) -> Optional[int]:
"""simple docstring"""
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(SCREAMING_SNAKE_CASE__ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('differentiate() requires an int as input for order' )
__UpperCamelCase = Dual(SCREAMING_SNAKE_CASE__ , 1 )
__UpperCamelCase = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase__ ( __lowercase : Optional[int] ) -> Tuple:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 53
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCAmelCase : Any = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Union[str, Any]:
__snake_case , __snake_case: int = create_model(
"""HTSAT-tiny""" , """roberta""" , SCREAMING_SNAKE_CASE__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=SCREAMING_SNAKE_CASE__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: Optional[Any] = {}
__snake_case: int = r""".*sequential.(\d+).*"""
__snake_case: List[str] = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case: Tuple = key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
# replace sequential layers with list
__snake_case: Optional[int] = re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).group(1)
__snake_case: str = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(SCREAMING_SNAKE_CASE__)//3}.linear.''')
elif re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Any = int(re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).group(1))
# Because in CLAP they use `nn.Sequential`...
__snake_case: Dict = 1 if projecton_layer == 0 else 2
__snake_case: Any = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''')
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case: List[str] = value
__snake_case: Optional[Any] = mixed_qkv.size(0) // 3
__snake_case: Union[str, Any] = mixed_qkv[:qkv_dim]
__snake_case: Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case: int = mixed_qkv[qkv_dim * 2 :]
__snake_case: Optional[Any] = query_layer
__snake_case: str = key_layer
__snake_case: int = value_layer
else:
__snake_case: Dict = value
return model_state_dict
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Optional[Any]:
__snake_case , __snake_case: List[str] = init_clap(SCREAMING_SNAKE_CASE__ , enable_fusion=SCREAMING_SNAKE_CASE__)
clap_model.eval()
__snake_case: List[str] = clap_model.state_dict()
__snake_case: Optional[int] = rename_state_dict(SCREAMING_SNAKE_CASE__)
__snake_case: Any = ClapConfig()
__snake_case: Dict = enable_fusion
__snake_case: List[str] = ClapModel(SCREAMING_SNAKE_CASE__)
# ignore the spectrogram embedding layer
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__)
model.save_pretrained(SCREAMING_SNAKE_CASE__)
transformers_config.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__UpperCAmelCase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 111
| 0
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case ,snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = imread("image_data/lena.jpg", 1)
# convert to its negative
SCREAMING_SNAKE_CASE__ = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 367
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase__ ( _UpperCamelCase : int = 8 ) -> str:
"""simple docstring"""
snake_case = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
i -= len(_UpperCamelCase )
snake_case = i // 3
snake_case = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case = (
chars_incl
+ random(_UpperCamelCase , quotient + remainder )
+ random(_UpperCamelCase , _UpperCamelCase )
+ random(_UpperCamelCase , _UpperCamelCase )
)
snake_case = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int = 8 ) -> bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case = any(char in ascii_uppercase for char in password )
snake_case = any(char in ascii_lowercase for char in password )
snake_case = any(char in digits for char in password )
snake_case = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase__ ( ) -> Any:
"""simple docstring"""
snake_case = int(input('Please indicate the max length of your password: ' ).strip() )
snake_case = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_UpperCamelCase ) )
print(
'Alternative Password generated:' , alternative_password_generator(_UpperCamelCase , _UpperCamelCase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 149
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any],*lowercase_ : Any,**lowercase_ : Union[str, Any] )-> Any:
'''simple docstring'''
super().__init__(*lowercase_,**lowercase_ )
self.check_model_type(lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Any=None,lowercase_ : Any=None,lowercase_ : Any=None,**lowercase_ : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ = {}, {}
if padding is not None:
A__ = padding
if truncation is not None:
A__ = truncation
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict,lowercase_ : Union["Image.Image", str],lowercase_ : str = None,**lowercase_ : List[Any] )-> Tuple:
'''simple docstring'''
if isinstance(lowercase_,(Image.Image, str) ) and isinstance(lowercase_,lowercase_ ):
A__ = {'image': image, 'question': question}
else:
A__ = image
A__ = super().__call__(lowercase_,**lowercase_ )
return results
def snake_case__ ( self : str,lowercase_ : Any,lowercase_ : int=False,lowercase_ : List[Any]=False )-> Tuple:
'''simple docstring'''
A__ = load_image(inputs['image'] )
A__ = self.tokenizer(
inputs['question'],return_tensors=self.framework,padding=lowercase_,truncation=lowercase_ )
A__ = self.image_processor(images=lowercase_,return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def snake_case__ ( self : Union[str, Any],lowercase_ : Union[str, Any] )-> List[str]:
'''simple docstring'''
A__ = self.model(**lowercase_ )
return model_outputs
def snake_case__ ( self : str,lowercase_ : str,lowercase_ : List[Any]=5 )-> int:
'''simple docstring'''
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.sigmoid()[0]
A__ , A__ = probs.topk(lowercase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_,lowercase_ )]
| 7
|
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7
| 1
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__A = logging.getLogger(__name__)
def __a ( lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= git.Repo(search_parent_directories=lowerCAmelCase_ )
UpperCAmelCase_= {
"""repo_id""": str(lowerCAmelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase_ ,"""git_log.json""" ) ,"""w""" ) as f:
json.dump(lowerCAmelCase_ ,lowerCAmelCase_ ,indent=4 )
def __a ( lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if params.n_gpu <= 0:
UpperCAmelCase_= 0
UpperCAmelCase_= -1
UpperCAmelCase_= True
UpperCAmelCase_= False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase_= int(os.environ["""WORLD_SIZE"""] )
UpperCAmelCase_= int(os.environ["""N_GPU_NODE"""] )
UpperCAmelCase_= int(os.environ["""RANK"""] )
# number of nodes / node ID
UpperCAmelCase_= params.world_size // params.n_gpu_per_node
UpperCAmelCase_= params.global_rank // params.n_gpu_per_node
UpperCAmelCase_= True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase_= 1
UpperCAmelCase_= 0
UpperCAmelCase_= 0
UpperCAmelCase_= 0
UpperCAmelCase_= 1
UpperCAmelCase_= 1
UpperCAmelCase_= False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase_= params.node_id == 0 and params.local_rank == 0
UpperCAmelCase_= params.n_nodes > 1
# summary
UpperCAmelCase_= F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" ,backend="""nccl""" ,)
def __a ( lowerCAmelCase_ : Dict ) -> Any:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 277
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def __a ( lowerCAmelCase_ : Accelerator ,lowerCAmelCase_ : int = 16 ,lowerCAmelCase_ : str = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_= load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowerCAmelCase_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_= tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_= datasets.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_= tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowerCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ ,padding="""max_length""" ,max_length=1_28 ,return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase_= DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
UpperCAmelCase_= DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_= Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_= config["""lr"""]
UpperCAmelCase_= int(config["""num_epochs"""] )
UpperCAmelCase_= int(config["""seed"""] )
UpperCAmelCase_= int(config["""batch_size"""] )
UpperCAmelCase_= args.model_name_or_path
set_seed(lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_= get_dataloaders(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,return_dict=lowerCAmelCase_ )
# Instantiate optimizer
UpperCAmelCase_= (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_= optimizer_cls(params=model.parameters() ,lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_= accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase_= 1
UpperCAmelCase_= (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_= get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ ,num_warmup_steps=0 ,num_training_steps=lowerCAmelCase_ ,)
else:
UpperCAmelCase_= DummyScheduler(lowerCAmelCase_ ,total_num_steps=lowerCAmelCase_ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_= 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_= 0
# Now we train the model
UpperCAmelCase_= evaluate.load("""glue""" ,"""mrpc""" )
UpperCAmelCase_= 0
UpperCAmelCase_= {}
for epoch in range(lowerCAmelCase_ ,lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.loss
UpperCAmelCase_= loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCAmelCase_= 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_= model(**lowerCAmelCase_ )
UpperCAmelCase_= outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCAmelCase_, UpperCAmelCase_= accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
UpperCAmelCase_= predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase_= references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ ,)
UpperCAmelCase_= metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,lowerCAmelCase_ )
UpperCAmelCase_= eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCAmelCase_= eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(lowerCAmelCase_ ,lowerCAmelCase_ )
def __a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=lowerCAmelCase_ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowerCAmelCase_ ,)
parser.add_argument(
"""--output_dir""" ,type=lowerCAmelCase_ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=lowerCAmelCase_ ,default=lowerCAmelCase_ ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowerCAmelCase_ ,default=3 ,help="""Number of train epochs.""" ,)
UpperCAmelCase_= parser.parse_args()
UpperCAmelCase_= {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ ,lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 277
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10_00 , ):
"""simple docstring"""
lowercase_ : List[Any] = parent
lowercase_ : Dict = batch_size
lowercase_ : Any = seq_length
lowercase_ : Union[str, Any] = is_training
lowercase_ : List[str] = use_input_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : str = use_labels
lowercase_ : Optional[int] = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Tuple = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : int = type_sequence_label_size
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : Union[str, Any] = num_choices
lowercase_ : Dict = scope
lowercase_ : Optional[Any] = range_bbox
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase_ : Any = bbox[i, j, 3]
lowercase_ : int = bbox[i, j, 1]
lowercase_ : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase_ : List[str] = bbox[i, j, 2]
lowercase_ : Union[str, Any] = bbox[i, j, 0]
lowercase_ : int = t
lowercase_ : Tuple = tf.convert_to_tensor(__snake_case )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
if self.use_token_type_ids:
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : List[Any] = None
lowercase_ : Tuple = None
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Dict = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = TFLayoutLMModel(config=__snake_case )
lowercase_ : List[str] = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
lowercase_ : List[str] = model(__snake_case , __snake_case , token_type_ids=__snake_case )
lowercase_ : str = model(__snake_case , __snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = TFLayoutLMForMaskedLM(config=__snake_case )
lowercase_ : Tuple = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = self.num_labels
lowercase_ : Tuple = TFLayoutLMForSequenceClassification(config=__snake_case )
lowercase_ : List[Any] = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = self.num_labels
lowercase_ : List[Any] = TFLayoutLMForTokenClassification(config=__snake_case )
lowercase_ : str = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = TFLayoutLMForQuestionAnswering(config=__snake_case )
lowercase_ : Union[str, Any] = model(__snake_case , __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : List[str] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = 1_0
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = TFLayoutLMModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : int = TFLayoutLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowercase_ : Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowercase_ : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase_ : Any = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
lowercase_ : str = model(input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
# test the sequence output on [0, :3, :3]
lowercase_ : Dict = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowercase_ : Any = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __snake_case , atol=1E-3 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
lowercase_ : Optional[Any] = model(
input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase_ : str = outputs.loss
lowercase_ : Tuple = (2,)
self.assertEqual(loss.shape , __snake_case )
# test the shape of the logits
lowercase_ : Tuple = outputs.logits
lowercase_ : List[str] = (2, 2)
self.assertEqual(logits.shape , __snake_case )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = prepare_layoutlm_batch_inputs()
# forward pass
lowercase_ : List[Any] = model(
input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
# test the shape of the logits
lowercase_ : Any = outputs.logits
lowercase_ : Optional[int] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , __snake_case )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
lowercase_ : Any = model(input_ids=__snake_case , bbox=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
# test the shape of the logits
lowercase_ : Any = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , __snake_case )
self.assertEqual(outputs.end_logits.shape , __snake_case )
| 93
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
# Construct model
if gpta_config_file == "":
a__ = GPTaConfig()
else:
a__ = GPTaConfig.from_json_file(__lowerCAmelCase )
a__ = GPTaModel(__lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
a__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
a__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
snake_case : Any = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 240
| 0
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowercase__ ( __UpperCamelCase )-> str:
return "".join(sorted(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase )-> list[str]:
return word_by_signature[signature(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE__ = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 183
|
'''simple docstring'''
from PIL import Image
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Image:
def brightness(__UpperCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 183
| 1
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowercase : Dict = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : int = TOKEN
HfFolder.save_token(__a )
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__a : Dict = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id='test-config' , push_to_hub=__a , use_auth_token=self._token )
__a : List[str] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__a : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id='valid_org/test-config-org' , push_to_hub=__a , use_auth_token=self._token )
__a : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
__a : str = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__a : Tuple = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__a : Dict = c.n_embd + 1 # int
__a : List[str] = c.resid_pdrop + 1.0 # float
__a : Optional[int] = not c.scale_attn_weights # bool
__a : Union[str, Any] = c.summary_type + 'foo' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(__a , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__a , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__a , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__a , c.summary_type , 'mismatch for key: summary_type' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = PretrainedConfig()
__a : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__a : int = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f""" {", ".join(__a )}.""" )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__a : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = mock.Mock()
__a : Dict = 500
__a : Any = {}
__a : int = HTTPError
__a : Tuple = {}
# Download this model to make sure it's in the cache.
__a : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__a ) as mock_head:
__a : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoConfig.from_pretrained('bert-base-cased' )
__a : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__a : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__a : Optional[int] = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__a : str = ['config.42.0.0.json']
__a : List[Any] = 768
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , 'config.4.0.0.json' ) , os.path.join(__a , 'config.42.0.0.json' ) )
__a : List[str] = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 768 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__a : Optional[Any] = 'v4.0.0'
__a , __a : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__a : Dict = 'v3.0.0'
__a : str = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 768 )
| 27
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 97
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=[16, 16] ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=44_100 ,_SCREAMING_SNAKE_CASE=86 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=0.0 ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,padding_value=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Optional[int] = spectrogram_length
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Any = feature_size // self.patch_size[1]
UpperCAmelCase_ : Any = n_fft
UpperCAmelCase_ : Tuple = sampling_rate // hop_length_to_sampling_rate
UpperCAmelCase_ : Optional[Any] = sampling_rate
UpperCAmelCase_ : Union[str, Any] = padding_value
UpperCAmelCase_ : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_SCREAMING_SNAKE_CASE ,min_frequency=0.0 ,max_frequency=2_20_50.0 ,sampling_rate=_SCREAMING_SNAKE_CASE ,norm='''slaney''' ,mel_scale='''slaney''' ,).T
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
UpperCAmelCase_ : Union[str, Any] = spectrogram(
_SCREAMING_SNAKE_CASE ,window_function(self.n_fft ,'''hann''' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='''dB''' ,db_range=80.0 ,)
UpperCAmelCase_ : Union[str, Any] = log_spec[:, :-1]
UpperCAmelCase_ : int = log_spec - 20.0
UpperCAmelCase_ : Tuple = np.clip(log_spec / 40.0 ,-2.0 ,0.0 ) + 1.0
return log_spec
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,**_SCREAMING_SNAKE_CASE ,) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase_ : str = isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase_ : Optional[int] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : str = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
UpperCAmelCase_ : str = np.asarray(_SCREAMING_SNAKE_CASE ,dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCAmelCase_ : Any = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = [np.asarray(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCAmelCase_ : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCAmelCase_ : Tuple = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
UpperCAmelCase_ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCAmelCase_ : List[Any] = np.ones([len(_SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCAmelCase_ : Any = padded_audio_features * self.padding_value
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : List[str] = audio_features[i]
UpperCAmelCase_ : int = feature
# return as BatchFeature
if return_attention_mask:
UpperCAmelCase_ : List[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
UpperCAmelCase_ : List[Any] = {'''audio_values''': padded_audio_features}
UpperCAmelCase_ : Optional[Any] = BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_inputs
| 235
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 235
| 1
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase__ ( A__ : NDArray[floataa] , A__ : NDArray[floataa] , A__ : list[int] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = coefficient_matrix.shape
__lowerCamelCase, __lowerCamelCase = constant_matrix.shape
if rowsa != colsa:
__lowerCamelCase = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(A__ )
if colsa != 1:
__lowerCamelCase = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(A__ )
if rowsa != rowsa:
__lowerCamelCase = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(A__ )
if len(A__ ) != rowsa:
__lowerCamelCase = (
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(A__ )} and {rowsa}'
)
raise ValueError(A__ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
__lowerCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowerCamelCase, __lowerCamelCase = table.shape
strictly_diagonally_dominant(A__ )
# Iterates the whole matrix for given number of times
for _ in range(A__ ):
__lowerCamelCase = []
for row in range(A__ ):
__lowerCamelCase = 0
for col in range(A__ ):
if col == row:
__lowerCamelCase = table[row][col]
elif col == cols - 1:
__lowerCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowerCamelCase = (temp + val) / denom
new_val.append(A__ )
__lowerCamelCase = new_val
return [float(A__ ) for i in new_val]
def lowerCamelCase__ ( A__ : NDArray[floataa] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = table.shape
__lowerCamelCase = True
for i in range(0 , A__ ):
__lowerCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : float ) ->float:
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _SCREAMING_SNAKE_CASE ( _lowercase : float ) ->float:
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _SCREAMING_SNAKE_CASE ( _lowercase : dict ) ->tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->XGBClassifier:
'''simple docstring'''
a : List[Any] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : List[str] = load_iris()
a, a : Optional[int] = data_handling(_lowercase )
a, a, a, a : Tuple = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
a : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
a : Dict = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 79
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Any = "▁"
a__ : int = {"vocab_file": "sentencepiece.bpe.model"}
a__ : Any = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
a__ : str = {
"facebook/xglm-564M": 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self :Tuple , _A :Union[str, Any] , _A :Any="<s>" , _A :int="</s>" , _A :List[str]="</s>" , _A :Optional[Any]="<s>" , _A :Optional[Any]="<unk>" , _A :Optional[int]="<pad>" , _A :Optional[Dict[str, Any]] = None , **_A :Tuple , ) -> None:
'''simple docstring'''
__A = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__A = 7
__A = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
__A = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
__A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__A = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__A = len(self.sp_model )
__A = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_A )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__A = self.__dict__.copy()
__A = None
__A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , _A :Union[str, Any] ) -> List[str]:
'''simple docstring'''
__A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase_ ( self :Any , _A :List[int] , _A :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__A = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase_ ( self :int , _A :List[int] , _A :Optional[List[int]] = None , _A :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A ))
def lowercase_ ( self :Optional[int] , _A :List[int] , _A :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__A = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase_ ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__A = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self :Dict , _A :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_A , out_type=_A )
def lowercase_ ( self :Optional[int] , _A :Any ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self :str , _A :Tuple ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self :Optional[Any] , _A :Dict ) -> List[Any]:
'''simple docstring'''
__A = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def lowercase_ ( self :Dict , _A :str , _A :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__A = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 161
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Optional[Any] = {"UserAgent": UserAgent().random}
def snake_case ( UpperCAmelCase )-> dict:
"""simple docstring"""
__A = script.contents[0]
__A = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase__ :
def __init__( self :Optional[Any] , _A :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = F'https://www.instagram.com/{username}/'
__A = self.get_json()
def lowercase_ ( self :Union[str, Any] ) -> dict:
'''simple docstring'''
__A = requests.get(self.url , headers=_A ).text
__A = BeautifulSoup(_A , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self :Union[str, Any] ) -> str:
'''simple docstring'''
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self :List[Any] ) -> str:
'''simple docstring'''
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def lowercase_ ( self :int ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def lowercase_ ( self :Tuple ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def lowercase_ ( self :Dict ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def lowercase_ ( self :Union[str, Any] ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def snake_case ( UpperCAmelCase = "github" )-> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__A = InstagramUser(UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 161
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowercase_ :
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.get_dummy_input()
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCamelCase_ ( self , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , ):
"""simple docstring"""
UpperCamelCase_ = 4
UpperCamelCase_ = 3_2
UpperCamelCase_ = (3_2, 3_2)
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = torch.device(__UpperCamelCase )
UpperCamelCase_ = (batch_size, num_channels) + sizes
UpperCamelCase_ = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase )
UpperCamelCase_ = {"""hidden_states""": hidden_states}
if include_temb:
UpperCamelCase_ = 1_2_8
UpperCamelCase_ = randn_tensor((batch_size, temb_channels) , generator=__UpperCamelCase , device=__UpperCamelCase )
if include_res_hidden_states_tuple:
UpperCamelCase_ = torch.manual_seed(1 )
UpperCamelCase_ = (randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase ),)
if include_encoder_hidden_states:
UpperCamelCase_ = floats_tensor((batch_size, 3_2, 3_2) ).to(__UpperCamelCase )
if include_skip_sample:
UpperCamelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=__UpperCamelCase , device=__UpperCamelCase )
return dummy_input
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
if self.block_type == "up":
UpperCamelCase_ = 3_2
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
UpperCamelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase_ = self.block_class(**__UpperCamelCase )
unet_block.to(__UpperCamelCase )
unet_block.eval()
with torch.no_grad():
UpperCamelCase_ = unet_block(**__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = output[0]
self.assertEqual(output.shape , self.output_shape )
UpperCamelCase_ = output[0, -1, -3:, -3:]
UpperCamelCase_ = torch.tensor(__UpperCamelCase ).to(__UpperCamelCase )
assert torch_all_close(output_slice.flatten() , __UpperCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase_ = self.block_class(**__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
UpperCamelCase_ = model(**__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = output[0]
UpperCamelCase_ = torch.device(__UpperCamelCase )
UpperCamelCase_ = randn_tensor(output.shape , device=__UpperCamelCase )
UpperCamelCase_ = torch.nn.functional.mse_loss(__UpperCamelCase , __UpperCamelCase )
loss.backward()
| 261
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =KandinskyVaaControlnetPipeline
lowercase_ : Dict =['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowercase_ : str =['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowercase_ : Dict =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase_ : str =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_0_0
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase = UNetaDConditionModel(**A__)
return model
@property
def A__ ( self):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = VQModel(**self.dummy_movq_kwargs)
return model
def A__ ( self):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='''linear''' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=A__ ,set_alpha_to_one=A__ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=A__ ,)
lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A__ ( self ,A__ ,A__=0):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
A__)
# create hint
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(A__)).to(A__)
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(A__) ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''')
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''')
lowercase = torch.from_numpy(np.array(A__)).float() / 255.0
lowercase = hint.permute(2 ,0 ,1).unsqueeze(0)
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa)
pipe_prior.to(A__)
lowercase = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' ,torch_dtype=torch.floataa)
lowercase = pipeline.to(A__)
pipeline.set_progress_bar_config(disable=A__)
lowercase = '''A robot, 4k photo'''
lowercase = torch.Generator(device='''cuda''').manual_seed(0)
lowercase , lowercase = pipe_prior(
A__ ,generator=A__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase = torch.Generator(device='''cuda''').manual_seed(0)
lowercase = pipeline(
image_embeds=A__ ,negative_image_embeds=A__ ,hint=A__ ,generator=A__ ,num_inference_steps=1_0_0 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :str = logging.get_logger(__name__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = '''huggingface/label-files'''
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = {v: k for k, v in idalabel.items()}
lowercase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if "stem.conv" in name:
lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowercase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowercase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowercase = '''bit.encoder.''' + name
return name
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
lowercase = get_config(lowerCAmelCase__ )
# load original model from timm
lowercase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase = state_dict.pop(lowerCAmelCase__ )
lowercase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowercase = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowercase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowercase = transform.transforms
lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase = prepare_img()
lowercase = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowercase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowercase = model(lowerCAmelCase__ )
lowercase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowercase__ :List[str] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 101
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _lowerCamelCase ( lowercase : Callable , lowercase : float , lowercase : float , lowercase : float , lowercase : float ) -> np.array:
_a = int(np.ceil((x_end - xa) / step_size ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(lowercase ):
_a = y[k] + step_size * ode_func(lowercase , y[k] )
_a = y[k] + (
(step_size / 2) * (ode_func(lowercase , y[k] ) + ode_func(x + step_size , lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowerCamelCase ( lowercase : str ) -> str:
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 346
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( a_ ):
__lowerCAmelCase = (DDPMScheduler,)
def __magic_name__ ( self , **_a ):
lowercase : str = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_a )
return config
def __magic_name__ ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __magic_name__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __magic_name__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __magic_name__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __magic_name__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __magic_name__ ( self ):
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __magic_name__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __magic_name__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.scheduler_classes[0]
lowercase : Optional[Any] = self.get_scheduler_config()
lowercase : str = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config()
lowercase : List[str] = scheduler_class(**_a )
lowercase : Optional[int] = len(_a )
lowercase : List[str] = self.dummy_model()
lowercase : Any = self.dummy_sample_deter
lowercase : str = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : int = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : Optional[int] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Dict = pred_prev_sample
lowercase : Any = torch.sum(torch.abs(_a ) )
lowercase : Any = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __magic_name__ ( self ):
lowercase : int = self.scheduler_classes[0]
lowercase : Dict = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase : Dict = scheduler_class(**_a )
lowercase : Optional[int] = len(_a )
lowercase : List[str] = self.dummy_model()
lowercase : List[Any] = self.dummy_sample_deter
lowercase : int = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : int = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : Any = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Tuple = pred_prev_sample
lowercase : List[str] = torch.sum(torch.abs(_a ) )
lowercase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __magic_name__ ( self ):
lowercase : List[Any] = self.scheduler_classes[0]
lowercase : List[Any] = self.get_scheduler_config()
lowercase : Optional[Any] = scheduler_class(**_a )
lowercase : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
lowercase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
lowercase : Dict = -1
else:
lowercase : List[str] = timesteps[i + 1]
lowercase : List[Any] = scheduler.previous_timestep(_a )
lowercase : Tuple = prev_t.item()
self.assertEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : int = self.scheduler_classes[0]
lowercase : List[str] = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**_a )
lowercase : Union[str, Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_a )
def __magic_name__ ( self ):
lowercase : int = self.scheduler_classes[0]
lowercase : Optional[int] = self.get_scheduler_config()
lowercase : str = scheduler_class(**_a )
lowercase : Optional[Any] = [100, 87, 50, 1, 0]
lowercase : List[Any] = len(_a )
with self.assertRaises(_a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __magic_name__ ( self ):
lowercase : List[str] = self.scheduler_classes[0]
lowercase : List[str] = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**_a )
lowercase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_a )
| 202
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A : List[Any] = logging.get_logger(__name__)
class a__ ( a_ ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = True , **_a , ):
super().__init__(**_a )
lowercase : Optional[Any] = size if size is not None else {"shortest_edge": 224}
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
lowercase : str = crop_size if crop_size is not None else {"height": 256, "width": 256}
lowercase : List[str] = get_size_dict(_a , param_name="crop_size" )
lowercase : int = do_resize
lowercase : Optional[int] = size
lowercase : str = resample
lowercase : List[Any] = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Optional[int] = do_center_crop
lowercase : Union[str, Any] = crop_size
lowercase : Optional[Any] = do_flip_channel_order
def __magic_name__ ( self , _a , _a , _a = PIL.Image.BILINEAR , _a = None , **_a , ):
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase : Union[str, Any] = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
lowercase : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a = None ):
return flip_channel_order(_a , data_format=_a )
def __magic_name__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Tuple = resample if resample is not None else self.resample
lowercase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[int] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_a , default_to_square=_a )
lowercase : int = crop_size if crop_size is not None else self.crop_size
lowercase : Any = get_size_dict(_a , param_name="crop_size" )
lowercase : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase : Any = [to_numpy_array(_a ) for image in images]
if do_resize:
lowercase : Optional[int] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowercase : str = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowercase : Union[str, Any] = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase : int = [self.flip_channel_order(image=_a ) for image in images]
lowercase : int = [to_channel_dimension_format(_a , _a ) for image in images]
lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
lowercase : Tuple = target_sizes.numpy()
lowercase : List[Any] = []
for idx in range(len(_a ) ):
lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowercase : str = logits.argmax(dim=1 )
lowercase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 202
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Optional[int] = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = 0
__UpperCAmelCase = len(snake_case_ ) - 1
while left <= right:
__UpperCAmelCase = left + (right - left) // 2
__UpperCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase = midpoint - 1
else:
__UpperCAmelCase = midpoint + 1
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int , snake_case_ :int ):
if right < left:
return None
__UpperCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = input('Enter numbers separated by comma:\n').strip()
_lowercase : Optional[int] = sorted(int(item) for item in user_input.split(','))
_lowercase : Optional[Any] = int(input('Enter a single number to be found in the list:\n'))
_lowercase : int = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 86
| 1
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
A_ : Dict = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(A_ ):
os.makedirs(A_ )
A_ : Optional[Any] = model.state_dict()
def to_tf_var_name(a_ ):
for patt, repl in iter(A_ ):
A_ : Optional[Any] = name.replace(A_ , A_ )
return F"bert/{name}"
def create_tf_var(a_ , a_ , a_ ):
A_ : Any = tf.dtypes.as_dtype(tensor.dtype )
A_ : int = tf.get_variable(dtype=A_ , shape=tensor.shape , name=A_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A_ : List[Any] = to_tf_var_name(A_ )
A_ : List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A_ : List[Any] = torch_tensor.T
A_ : int = create_tf_var(tensor=A_ , name=A_ , session=A_ )
tf.keras.backend.set_value(A_ , A_ )
A_ : Optional[Any] = session.run(A_ )
print(F"Successfully created {tf_name}: {np.allclose(A_ , A_ )}" )
A_ : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(A_ , os.path.join(A_ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def UpperCAmelCase ( a_=None ) -> List[str]:
"""simple docstring"""
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=A_ , required=A_ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=A_ , default=A_ , required=A_ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=A_ , required=A_ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=A_ , required=A_ , help="""Directory in which to save tensorflow model""" )
A_ : Any = parser.parse_args(A_ )
A_ : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=A_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 344
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A__: Optional[int] = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
A__: int = logging.WARNING
def lowerCAmelCase_ ( ):
UpperCamelCase__: Optional[int] = os.getenv("DATASETS_VERBOSITY" ,A_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys()) }")
return _default_log_level
def lowerCAmelCase_ ( ):
return __name__.split(".")[0]
def lowerCAmelCase_ ( ):
return logging.getLogger(_get_library_name())
def lowerCAmelCase_ ( ):
# Apply our default configuration to the library root logger.
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowerCAmelCase_ ( A_ = None):
if name is None:
UpperCamelCase__: Optional[Any] = _get_library_name()
return logging.getLogger(A_)
def lowerCAmelCase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( A_):
_get_library_root_logger().setLevel(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
return set_verbosity(A_)
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[Any] = False
def lowerCAmelCase_ ( ):
UpperCamelCase__: List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
"""simple docstring"""
def __init__( self: int , *__lowerCamelCase: Tuple , **__lowerCamelCase: str ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase__: int = args[0] if args else None
def __iter__( self: Optional[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self: Dict , __lowerCamelCase: Any ):
'''simple docstring'''
def empty_fn(*__lowerCamelCase: Any , **__lowerCamelCase: Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self: str ):
'''simple docstring'''
return self
def __exit__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return
A__: Tuple = True
class _a :
"""simple docstring"""
def __call__( self: Any , *__lowerCamelCase: List[str] , __lowerCamelCase: List[Any]=False , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] , *__lowerCamelCase: List[str] , **__lowerCamelCase: Tuple ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__: Optional[Any] = _tqdm_cls()
def lowerCAmelCase_ ( ):
global _tqdm_active
return bool(_tqdm_active)
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: int = True
def lowerCAmelCase_ ( ):
global _tqdm_active
UpperCamelCase__: str = False
| 149
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__UpperCamelCase : Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE : List[Any] = "lm_head"
SCREAMING_SNAKE_CASE : List[str] = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
SCREAMING_SNAKE_CASE : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Optional[int] = value
else:
SCREAMING_SNAKE_CASE : int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : int = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : List[Any] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : List[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Optional[int] = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Optional[int] = mapped_key.replace('''*''' , __lowerCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : str = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[Any] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : str = "weight"
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : int = name.split('''.''' )
SCREAMING_SNAKE_CASE : Any = int(items[0] )
SCREAMING_SNAKE_CASE : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ):
if config_path is not None:
SCREAMING_SNAKE_CASE : int = UniSpeechConfig.from_pretrained(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = Dictionary.load_from_json(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : Any = target_dict.pad_index
SCREAMING_SNAKE_CASE : List[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE : List[str] = target_dict.eos_index
SCREAMING_SNAKE_CASE : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(__lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE : str = 42
SCREAMING_SNAKE_CASE : int = 43
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = WavaVecaPhonemeCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
SCREAMING_SNAKE_CASE : int = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = UniSpeechForCTC(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = UniSpeechForPreTraining(__lowerCamelCase )
if is_finetuned:
SCREAMING_SNAKE_CASE : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
SCREAMING_SNAKE_CASE : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_unispeech.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 360
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *_lowercase ):
with open(_lowercase , '''r''' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__UpperCamelCase : Union[str, Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__UpperCamelCase : Any = torch.device('cuda', local_rank)
__UpperCamelCase : Union[str, Any] = socket.gethostname()
__UpperCamelCase : Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCamelCase : List[Any] = dist.get_rank()
__UpperCamelCase : List[Any] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 258
| 0
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ = 100 ) -> int:
_a : List[str] = (n * (n + 1) // 2) ** 2
_a : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
| 1
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
_snake_case : List[Any] = 5
# Realm tok
_snake_case : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_snake_case : List[Any] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_snake_case : str = os.path.join(lowercase_ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_snake_case : Dict = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
def UpperCamelCase ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : Tuple = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase ( self ):
_snake_case : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def UpperCamelCase ( self ):
_snake_case : Tuple = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=lowercase_ , )
return block_records
def UpperCamelCase ( self ):
_snake_case : List[str] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_config()
_snake_case : List[Any] = self.get_dummy_retriever()
_snake_case : Any = retriever.tokenizer
_snake_case : Optional[int] = np.array([0, 3] , dtype="long" )
_snake_case : Optional[Any] = tokenizer(["Test question"] ).input_ids
_snake_case : Any = tokenizer(
["the fourth"] , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , ).input_ids
_snake_case : List[Any] = config.reader_seq_len
_snake_case ,_snake_case ,_snake_case ,_snake_case : Any = retriever(
lowercase_ , lowercase_ , answer_ids=lowercase_ , max_length=lowercase_ , return_tensors="np" )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_config()
_snake_case : Union[str, Any] = self.get_dummy_retriever()
_snake_case : Tuple = retriever.tokenizer
_snake_case : Union[str, Any] = np.array([0, 3, 5] , dtype="long" )
_snake_case : Optional[Any] = tokenizer(["Test question"] ).input_ids
_snake_case : Optional[Any] = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , ).input_ids
_snake_case : List[str] = config.reader_seq_len
_snake_case ,_snake_case ,_snake_case ,_snake_case : Optional[Any] = retriever(
lowercase_ , lowercase_ , answer_ids=lowercase_ , max_length=lowercase_ , return_tensors="np" )
self.assertEqual([False, True, True] , lowercase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_snake_case : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_snake_case : Any = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_snake_case : List[Any] = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 284
|
import os
import pytest
from attr import dataclass
__SCREAMING_SNAKE_CASE : int = 'us-east-1' # defaults region
@dataclass
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_lowerCamelCase = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5_500,
}
_lowerCamelCase = {**hyperparameters, 'max_steps': 1_000}
@property
def UpperCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCamelCase ( self ):
return f"""{self.framework}-transfromers-test"""
@property
def UpperCamelCase ( self ):
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def UpperCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
| 284
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase_ = {
'''camembert-base''': 5_12,
}
lowerCAmelCase_ = '''▁'''
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict="<s>" , _UpperCamelCase : str="</s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : List[Any]="<s>" , _UpperCamelCase : Optional[Any]="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
snake_case_ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
snake_case_ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
snake_case_ = len(self.fairseq_tokens_to_ids )
snake_case_ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case__( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def snake_case__( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__( self : Any ) ->Union[str, Any]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__( self : Tuple , _UpperCamelCase : str ) ->List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def snake_case__( self : List[str] , _UpperCamelCase : int ) ->Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_UpperCamelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_UpperCamelCase )
def snake_case__( self : List[Any] , _UpperCamelCase : Tuple ) ->Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__( self : Any , _UpperCamelCase : str ) ->Any:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __getstate__( self : List[Any] ) ->str:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : int , _UpperCamelCase : str ) ->str:
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 8
|
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE : Tuple = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE : Tuple = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_SCREAMING_SNAKE_CASE : int = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_SCREAMING_SNAKE_CASE : List[Any] = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase ( self : List[str] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]=0.9 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.5 ) -> int:
if NLTK_VERSION >= version.Version('3.6.5' ):
lowerCamelCase_ = [
meteor_score.single_meteor_score(
word_tokenize(__SCREAMING_SNAKE_CASE ) , word_tokenize(__SCREAMING_SNAKE_CASE ) , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE )
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
else:
lowerCamelCase_ = [
meteor_score.single_meteor_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , alpha=__SCREAMING_SNAKE_CASE , beta=__SCREAMING_SNAKE_CASE , gamma=__SCREAMING_SNAKE_CASE )
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(__SCREAMING_SNAKE_CASE )}
| 183
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "vision-encoder-decoder"
a = True
def __init__( self : Optional[Any] , **__lowerCamelCase : Union[str, Any] ) -> int:
super().__init__(**__lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''encoder''' )
SCREAMING_SNAKE_CASE__ = encoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''decoder''' )
SCREAMING_SNAKE_CASE__ = decoder_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def lowercase_ ( cls : Union[str, Any] , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Optional[int] ) -> PretrainedConfig:
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = version.parse("1.11" )
@property
def lowercase_ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase_ ( self : List[str] ) -> float:
return 1e-4
@property
def lowercase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowercase_ ( self : List[Any] , __lowerCamelCase : "PreTrainedTokenizerBase" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = dummy_input['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE__ = dummy_input.pop('''input_ids''' )
SCREAMING_SNAKE_CASE__ = dummy_input.pop('''attention_mask''' )
SCREAMING_SNAKE_CASE__ = torch.zeros(__lowerCamelCase )
return common_inputs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Any ) -> None:
pass
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : PretrainedConfig , __lowerCamelCase : str = "default" ) -> OnnxConfig:
SCREAMING_SNAKE_CASE__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCamelCase , __lowerCamelCase )
| 218
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "vivit"
def __init__( self : str , __lowerCamelCase : List[Any]=224 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Tuple=[2, 16, 16] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Any="gelu_fast" , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1e-06 , __lowerCamelCase : Dict=True , **__lowerCamelCase : Any , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = tubelet_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = qkv_bias
super().__init__(**__lowerCamelCase )
| 218
| 1
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCAmelCase ( __a : Dict ,__a : Optional[int] ) -> int:
"""simple docstring"""
_a : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_a : Optional[int] = Image.open(requests.get(__a ,stream=__a ).raw ).convert('''RGB''' )
_a : List[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) ,interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) ,(0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
_a : Tuple = transform(__a ).unsqueeze(0 ).to(__a )
return image
def __UpperCAmelCase ( __a : Any ) -> Optional[Any]:
"""simple docstring"""
if "visual_encoder" in key:
_a : Dict = re.sub('''visual_encoder*''' ,'''vision_model.encoder''' ,__a )
if "blocks" in key:
_a : Dict = re.sub(R'''blocks''' ,'''layers''' ,__a )
if "attn" in key:
_a : int = re.sub(R'''attn''' ,'''self_attn''' ,__a )
if "norm1" in key:
_a : Tuple = re.sub(R'''norm1''' ,'''layer_norm1''' ,__a )
if "norm2" in key:
_a : List[str] = re.sub(R'''norm2''' ,'''layer_norm2''' ,__a )
if "encoder.norm" in key:
_a : List[Any] = re.sub(R'''encoder.norm''' ,'''post_layernorm''' ,__a )
if "encoder.patch_embed.proj" in key:
_a : List[Any] = re.sub(R'''encoder.patch_embed.proj''' ,'''embeddings.patch_embedding''' ,__a )
if "encoder.pos_embed" in key:
_a : Union[str, Any] = re.sub(R'''encoder.pos_embed''' ,'''embeddings.position_embedding''' ,__a )
if "encoder.cls_token" in key:
_a : Union[str, Any] = re.sub(R'''encoder.cls_token''' ,'''embeddings.class_embedding''' ,__a )
if "self_attn" in key:
_a : int = re.sub(R'''self_attn.proj''' ,'''self_attn.projection''' ,__a )
return key
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : List[str] = BlipConfig.from_pretrained(__a )
else:
_a : Optional[Any] = BlipConfig(projection_dim=512 ,text_config={} ,vision_config={} )
_a : Union[str, Any] = BlipForConditionalGeneration(__a ).eval()
_a : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_a : Union[str, Any] = blip_decoder(pretrained=__a ,image_size=384 ,vit='''base''' )
_a : List[Any] = pt_model.eval()
_a : List[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
_a : List[str] = modified_state_dict.pop(__a )
_a : List[Any] = rename_key(__a )
_a : Union[str, Any] = value
hf_model.load_state_dict(__a )
_a : Optional[Any] = 384
_a : int = load_demo_image(image_size=__a ,device='''cpu''' )
_a : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_a : List[Any] = tokenizer(['''a picture of'''] ).input_ids
_a : Tuple = hf_model.generate(__a ,__a )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
_a : str = hf_model.generate(__a )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__a )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_a : int = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_a : Optional[int] = blip_vqa(pretrained=__a ,image_size=__a ,vit='''base''' )
vqa_model.eval()
_a : List[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
_a : Optional[Any] = modified_state_dict.pop(__a )
_a : Any = rename_key(__a )
_a : Dict = value
_a : Union[str, Any] = BlipForQuestionAnswering(__a )
hf_vqa_model.load_state_dict(__a )
_a : Union[str, Any] = ['''How many dogs are in this image?''']
_a : List[Any] = tokenizer(__a ,return_tensors='''pt''' ).input_ids
_a : Union[str, Any] = hf_vqa_model.generate(__a ,__a )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_a : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_a : str = blip_itm(pretrained=__a ,image_size=__a ,vit='''base''' )
itm_model.eval()
_a : Dict = itm_model.state_dict()
for key in modified_state_dict.copy():
_a : Optional[Any] = modified_state_dict.pop(__a )
_a : Any = rename_key(__a )
_a : Any = value
_a : Tuple = BlipForImageTextRetrieval(__a )
_a : List[Any] = ['''A picture of a woman with a dog sitting in a beach''']
_a : int = tokenizer(
__a ,return_tensors='''pt''' ,padding='''max_length''' ,truncation=__a ,max_length=35 ,).input_ids
hf_itm_model.load_state_dict(__a )
hf_itm_model.eval()
_a : Tuple = hf_itm_model(__a ,__a ,use_itm_head=__a )
_a : Optional[Any] = hf_itm_model(__a ,__a ,use_itm_head=__a )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] ,dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 235
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 235
| 1
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('_T')
class __snake_case( Generic[_T] ):
'''simple docstring'''
def __init__( self , A_ = None ) -> None:
lowerCAmelCase = list(iterable or [] )
lowerCAmelCase = []
def __len__( self ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> str:
return f'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def __snake_case ( self , A_ ) -> None:
self._stacka.append(A_ )
def __snake_case ( self ) -> _T:
lowerCAmelCase = self._stacka.pop
lowerCAmelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 187
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(A_ )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , A_ )
lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
| 187
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''donut-swin'''
snake_case__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __UpperCamelCase : List[Any]=224 , __UpperCamelCase : int=4 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : List[str]=96 , __UpperCamelCase : Optional[Any]=[2, 2, 6, 2] , __UpperCamelCase : Optional[int]=[3, 6, 12, 24] , __UpperCamelCase : str=7 , __UpperCamelCase : Any=4.0 , __UpperCamelCase : Any=True , __UpperCamelCase : int=0.0 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : List[str]=1E-5 , **__UpperCamelCase : List[Any] , ) -> List[str]:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
| 256
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
UpperCAmelCase = logging.getLogger(__name__)
UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''bertabs'''
def __init__( self : Optional[Any] , __UpperCamelCase : List[Any]=3_0522 , __UpperCamelCase : Any=512 , __UpperCamelCase : int=6 , __UpperCamelCase : Optional[Any]=512 , __UpperCamelCase : Any=8 , __UpperCamelCase : int=512 , __UpperCamelCase : str=0.2 , __UpperCamelCase : List[str]=6 , __UpperCamelCase : Optional[Any]=768 , __UpperCamelCase : Union[str, Any]=8 , __UpperCamelCase : Optional[Any]=2048 , __UpperCamelCase : str=0.2 , **__UpperCamelCase : List[Any] , ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 256
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase , **__UpperCAmelCase ):
requires_backends(lowercase__ , ["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
class UpperCamelCase ( metaclass=_A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["torch"]
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(self ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
@classmethod
def lowerCamelCase__ ( cls ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
requires_backends(cls ,["""torch"""] )
| 363
|
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =1
SCREAMING_SNAKE_CASE_: List[Any] =3
SCREAMING_SNAKE_CASE_: List[str] =(32, 32)
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
@property
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
def extract(*lowerCAmelCase : List[Any] , **lowerCAmelCase : List[str] ):
class a :
def __init__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =torch.ones([0] )
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Any:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase )
return self
return Out()
return extract
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Dict =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: str =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_vae
SCREAMING_SNAKE_CASE_: Dict =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Tuple =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: int =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: int =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Dict =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: str =output.images
SCREAMING_SNAKE_CASE_: Any =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Dict =np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: Tuple =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.dummy_vae
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: str =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: Dict =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Optional[int] =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Any =sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Any =output.images
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Dict =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: List[str] =np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(pipe.scheduler , lowerCAmelCase )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_: str =pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_: Optional[int] =pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: Tuple =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.dummy_vae
SCREAMING_SNAKE_CASE_: str =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: int =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
SCREAMING_SNAKE_CASE_: Optional[Any] =unet.half()
SCREAMING_SNAKE_CASE_: Union[str, Any] =vae.half()
SCREAMING_SNAKE_CASE_: str =bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: str =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: int =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: str =sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =(
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
SCREAMING_SNAKE_CASE_: Optional[int] =40_0366_0346
SCREAMING_SNAKE_CASE_: Any =7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =[0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE_: int =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Optional[Any] =output.images
SCREAMING_SNAKE_CASE_: Any =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""padme amidala taking a bath artwork, safe for work, no nudity"""
SCREAMING_SNAKE_CASE_: Dict =27_3497_1755
SCREAMING_SNAKE_CASE_: Any =7
SCREAMING_SNAKE_CASE_: int =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: Optional[Any] =output.images
SCREAMING_SNAKE_CASE_: Dict =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =[0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE_: List[str] =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Tuple =output.images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: int =[0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
SCREAMING_SNAKE_CASE_: Any =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =(
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
SCREAMING_SNAKE_CASE_: Dict =10_4435_5234
SCREAMING_SNAKE_CASE_: Any =12
SCREAMING_SNAKE_CASE_: str =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: Dict =output.images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Any =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Optional[Any] =output.images
SCREAMING_SNAKE_CASE_: List[str] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 173
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: Optional[int] =str(bin(lowercase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Any =str(bin(lowercase ) )[2:]
SCREAMING_SNAKE_CASE_: Dict =max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
| 1
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : int , _snake_case : Union[str, Any] ):
lowerCAmelCase : Optional[int] = multiprocessing.Manager()
lowerCAmelCase : Tuple = manager.list()
lowerCAmelCase : List[Any] = multiprocessing.Process(target=a__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _snake_case ( _snake_case : Any , _snake_case : int , _snake_case : Any ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase : List[str] = shutil.rmtree
lowerCAmelCase : Union[str, Any] = os.rmdir
lowerCAmelCase : Tuple = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase : List[Any] = {}
with swallow_io():
with time_limit(a__ ):
exec(a__ , a__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
lowerCAmelCase : int = rmtree
lowerCAmelCase : Dict = rmdir
lowerCAmelCase : Tuple = chdir
@contextlib.contextmanager
def _snake_case ( _snake_case : str ):
def signal_handler(_snake_case : Any , _snake_case : int ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , a__ )
signal.signal(signal.SIGALRM , a__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _snake_case ( ):
lowerCAmelCase : Tuple = WriteOnlyStringIO()
with contextlib.redirect_stdout(a__ ):
with contextlib.redirect_stderr(a__ ):
with redirect_stdin(a__ ):
yield
@contextlib.contextmanager
def _snake_case ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(a__ ):
yield dirname
class snake_case_( a__ ):
pass
class snake_case_( io.StringIO ):
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Dict ):
raise OSError
def lowerCamelCase__ ( self : str , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ):
raise OSError
def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[Any] ):
raise OSError
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ):
return False
class snake_case_( contextlib._RedirectStream ): # type: ignore
__UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def _snake_case ( _snake_case : List[Any] ):
if root == ".":
yield
return
lowerCAmelCase : Dict = os.getcwd()
os.chdir(a__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a__ )
def _snake_case ( _snake_case : List[str]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Dict = None
import os
lowerCAmelCase : Dict = '''1'''
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : int = None
lowerCAmelCase : Any = None
lowerCAmelCase : Any = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : int = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Any = None
lowerCAmelCase : int = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : Dict = None
import shutil
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : str = None
import subprocess
lowerCAmelCase : Dict = None # type: ignore
lowerCAmelCase : List[str] = None
import sys
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Dict = None
lowerCAmelCase : str = None
lowerCAmelCase : Dict = None
lowerCAmelCase : List[str] = None
| 369
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=8 ) -> Any:
snake_case : Optional[int] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
text_encoder=A , tokenizer=A , unet=A , scheduler=A , movq=A , )
snake_case : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , A , A , A , A , A , A ) -> Optional[Any]:
if latents is None:
snake_case : Optional[int] = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Optional[Any] = latents.to(A )
snake_case : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self , A , A , A , A , A=None , ) -> Dict:
snake_case : int = len(A ) if isinstance(A , A ) else 1
# get prompt text embeddings
snake_case : Dict = self.tokenizer(
A , padding="""max_length""" , truncation=A , max_length=7_7 , return_attention_mask=A , add_special_tokens=A , return_tensors="""pt""" , )
snake_case : Dict = text_inputs.input_ids
snake_case : List[str] = self.tokenizer(A , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A , A ):
snake_case : Dict = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case : Optional[int] = text_input_ids.to(A )
snake_case : List[str] = text_inputs.attention_mask.to(A )
snake_case , snake_case : int = self.text_encoder(
input_ids=A , attention_mask=A )
snake_case : Optional[int] = prompt_embeds.repeat_interleave(A , dim=0 )
snake_case : Tuple = text_encoder_hidden_states.repeat_interleave(A , dim=0 )
snake_case : List[str] = text_mask.repeat_interleave(A , dim=0 )
if do_classifier_free_guidance:
snake_case : List[str]
if negative_prompt is None:
snake_case : Tuple = [""""""] * batch_size
elif type(A ) is not type(A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(A )} !="""
f""" {type(A )}.""" )
elif isinstance(A , A ):
snake_case : str = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
snake_case : Any = negative_prompt
snake_case : Optional[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=7_7 , truncation=A , return_attention_mask=A , add_special_tokens=A , return_tensors="""pt""" , )
snake_case : List[str] = uncond_input.input_ids.to(A )
snake_case : Dict = uncond_input.attention_mask.to(A )
snake_case , snake_case : Any = self.text_encoder(
input_ids=A , attention_mask=A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case : int = negative_prompt_embeds.shape[1]
snake_case : Optional[int] = negative_prompt_embeds.repeat(1 , A )
snake_case : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A )
snake_case : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
snake_case : Any = uncond_text_encoder_hidden_states.repeat(1 , A , 1 )
snake_case : Union[str, Any] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A , -1 )
snake_case : Optional[int] = uncond_text_mask.repeat_interleave(A , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case : List[str] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase ( self , A=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case : str = torch.device(f"""cuda:{gpu_id}""" )
snake_case : Dict = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def UpperCAmelCase ( self , A=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(A , A , prev_module_hook=A )
if self.safety_checker is not None:
snake_case , snake_case : Optional[Any] = cpu_offload_with_hook(self.safety_checker , A , prev_module_hook=A )
# We'll offload the last model manually.
snake_case : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ) -> List[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A , A = None , A = 5_1_2 , A = 5_1_2 , A = 1_0_0 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> Tuple:
if isinstance(A , A ):
snake_case : Tuple = 1
elif isinstance(A , A ):
snake_case : Tuple = len(A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(A )}""" )
snake_case : Any = self._execution_device
snake_case : List[str] = batch_size * num_images_per_prompt
snake_case : Optional[int] = guidance_scale > 1.0
snake_case , snake_case , snake_case : str = self._encode_prompt(
A , A , A , A , A )
if isinstance(A , A ):
snake_case : List[Any] = torch.cat(A , dim=0 )
if isinstance(A , A ):
snake_case : int = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
snake_case : Tuple = image_embeds.repeat_interleave(A , dim=0 )
snake_case : int = negative_image_embeds.repeat_interleave(A , dim=0 )
snake_case : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
snake_case : Tuple = self.scheduler.timesteps
snake_case : List[str] = self.unet.config.in_channels
snake_case , snake_case : List[str] = get_new_h_w(A , A , self.movq_scale_factor )
# create initial latent
snake_case : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Dict = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
snake_case : str = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Tuple = noise_pred.chunk(2 )
snake_case , snake_case : str = variance_pred.chunk(2 )
snake_case : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : int = self.scheduler.step(
A , A , A , generator=A , ).prev_sample
# post-processing
snake_case : Any = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case : str = image * 0.5 + 0.5
snake_case : List[str] = image.clamp(0 , 1 )
snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : List[str] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 124
|
from __future__ import annotations
import math
lowerCamelCase : Optional[int] = '2020.9.26'
lowerCamelCase : int = 'xcodz-dot, cclaus, dhruvmanila'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float]:
if not all(isinstance(lowercase ,(float, int) ) for val in locals().values() ):
snake_case : Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(lowercase )
snake_case : List[str] = ((x * distance) / (z + distance)) * scale
snake_case : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float, float]:
if not isinstance(lowercase ,lowercase ):
raise TypeError("""Axis must be a str""" )
snake_case : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(lowercase ,(float, int) ) for val in input_variables.values() ):
snake_case : int = (
"""Input values except axis must either be float or int: """
f"""{list(input_variables.values() )}"""
)
raise TypeError(lowercase )
snake_case : int = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case : str = x * math.cos(lowercase ) - y * math.sin(lowercase )
snake_case : List[Any] = y * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = z
elif axis == "x":
snake_case : Optional[Any] = y * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Optional[int] = z * math.cos(lowercase ) + y * math.sin(lowercase )
snake_case : Optional[int] = x
elif axis == "y":
snake_case : List[str] = x * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Tuple = z * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 124
| 1
|
"""simple docstring"""
import heapq
import sys
import numpy as np
snake_case = tuple[int, int]
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Optional[int] = set()
def _A ( self : Optional[int] ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def _A ( self : List[Any] ):
return len(self.elements ) == 0
def _A ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCAmelCase_ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE : Optional[Any] = []
(SCREAMING_SNAKE_CASE) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(SCREAMING_SNAKE_CASE) : List[str] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
if item in self.set:
self.set.remove(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
(SCREAMING_SNAKE_CASE) : Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(SCREAMING_SNAKE_CASE) : Union[str, Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _A ( self : Dict ):
return self.elements[0][1]
def _A ( self : List[str] ):
(SCREAMING_SNAKE_CASE) : Tuple = heapq.heappop(self.elements )
self.set.remove(UpperCAmelCase_ )
return (priority, item)
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.array(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = np.array(lowercase )
return np.linalg.norm(a - b )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return consistent_heuristic(lowercase , lowercase ) // t
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = g_function[start] + Wa * heuristics[i](lowercase , lowercase )
return ans
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = np.chararray((n, n) )
for i in range(lowercase ):
for j in range(lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = "*"
for i in range(lowercase ):
for j in range(lowercase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE : Dict = "#"
SCREAMING_SNAKE_CASE : Optional[Any] = "-"
SCREAMING_SNAKE_CASE : Optional[int] = back_pointer[goal]
while x != start:
(SCREAMING_SNAKE_CASE) : Optional[int] = x
# print(x)
SCREAMING_SNAKE_CASE : List[str] = "-"
SCREAMING_SNAKE_CASE : Tuple = back_pointer[x]
SCREAMING_SNAKE_CASE : Optional[int] = "-"
for i in range(lowercase ):
for j in range(lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
SCREAMING_SNAKE_CASE : List[str] = back_pointer[goal]
while x != start:
print(lowercase , end=" " )
SCREAMING_SNAKE_CASE : Optional[Any] = back_pointer[x]
print(lowercase )
sys.exit()
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
for itera in range(lowercase ):
open_list[itera].remove_element(lowercase )
# print("s", s)
# print("j", j)
(SCREAMING_SNAKE_CASE) : Optional[Any] = s
SCREAMING_SNAKE_CASE : str = (x - 1, y)
SCREAMING_SNAKE_CASE : Optional[int] = (x + 1, y)
SCREAMING_SNAKE_CASE : str = (x, y + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = -1
SCREAMING_SNAKE_CASE : Dict = float("inf" )
if valid(lowercase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE : int = g_function[s] + 1
SCREAMING_SNAKE_CASE : Optional[int] = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase , key(lowercase , 0 , lowercase , lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase ):
if key(lowercase , lowercase , lowercase , lowercase ) <= Wa * key(
lowercase , 0 , lowercase , lowercase ):
open_list[j].put(
lowercase , key(lowercase , lowercase , lowercase , lowercase ) )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
snake_case = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
snake_case = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
snake_case = make_common_ground()
snake_case = blocks_blk
# hyper parameters
snake_case = 1
snake_case = 1
snake_case = 20
snake_case = 3 # one consistent and two other inconsistent
# start and end destination
snake_case = (0, 0)
snake_case = (n - 1, n - 1)
snake_case = 1
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {start: 0, goal: float("inf" )}
SCREAMING_SNAKE_CASE : str = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Dict = set()
for i in range(lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase , key(lowercase , lowercase , lowercase , lowercase ) )
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(lowercase , lowercase , lowercase )
else:
SCREAMING_SNAKE_CASE : Tuple = open_list[i].top_show()
visited.add(lowercase )
expand_state(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
close_list_inad.append(lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(lowercase , lowercase , lowercase )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = open_list[0].top_show()
visited.add(lowercase )
expand_state(
lowercase , 0 , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
close_list_anchor.append(lowercase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 355
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
UpperCamelCase__ = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
UpperCamelCase__ = {
'ernie-m-base': 5_1_4,
'ernie-m-large': 5_1_4,
}
UpperCamelCase__ = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = ["input_ids"]
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = RESOURCE_FILES_NAMES
def __init__(self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Optional[Any]="utf8" , __UpperCAmelCase : Union[str, Any]="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : Union[str, Any]="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Tuple="[MASK]" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , vocab_file=__UpperCAmelCase , encoding=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = sentencepiece_model_ckpt
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase__ = self.load_vocab(filepath=__UpperCAmelCase )
else:
UpperCAmelCase__ = {self.sp_model.id_to_piece(__UpperCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase__ = {v: k for k, v in self.vocab.items()}
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase__ = self.tokenize(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = "", []
for i, ch in enumerate(__UpperCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase__ = self.SP_CHAR_MAPPING.get(__UpperCAmelCase )
else:
UpperCAmelCase__ = unicodedata.normalize("NFKC" , __UpperCAmelCase )
if self.is_whitespace(__UpperCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__UpperCAmelCase ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase__ = token[1:]
UpperCAmelCase__ = text[offset:].index(__UpperCAmelCase ) + offset
UpperCAmelCase__ = start + len(__UpperCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase__ = end
return token_mapping
@property
def lowercase_ (self : str ) -> List[str]:
"""simple docstring"""
return len(self.vocab )
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__(self : int , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> Any:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__UpperCAmelCase , __UpperCAmelCase ) for c in text) )
def lowercase_ (self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=False , __UpperCAmelCase : str=6_4 , __UpperCAmelCase : str=0.1 ) -> str:
"""simple docstring"""
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCAmelCase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCAmelCase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCAmelCase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.sp_model.SampleEncodeAsPieces(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = []
for pi, piece in enumerate(__UpperCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__UpperCAmelCase ) and pi != 0:
new_pieces.append(__UpperCAmelCase )
continue
else:
continue
UpperCAmelCase__ = 0
for i, chunk in enumerate(__UpperCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__UpperCAmelCase ) or self.is_punct(__UpperCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__UpperCAmelCase )
UpperCAmelCase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ = i
if len(__UpperCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase_ (self : Tuple , __UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def lowercase_ (self : Any , __UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.convert_ids_to_tokens(__UpperCAmelCase )
UpperCAmelCase__ = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.reverse_vocab.get(__UpperCAmelCase , self.unk_token )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]=None ) -> Dict:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str=None ) -> Dict:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=False ) -> Tuple:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__UpperCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__UpperCAmelCase ) + 1) + [1] * (len(__UpperCAmelCase ) + 3)
def lowercase_ (self : Any , __UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__UpperCAmelCase ) == 1:
UpperCAmelCase__ = unicodedata.category(__UpperCAmelCase )
if cat == "Zs":
return True
return False
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {}
with io.open(__UpperCAmelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = line.rstrip("\n" )
UpperCAmelCase__ = int(__UpperCAmelCase )
return token_to_idx
def lowercase_ (self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase__ = 0
if os.path.isdir(__UpperCAmelCase ):
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(token + "\n" )
index += 1
UpperCAmelCase__ = os.path.join(__UpperCAmelCase , "sentencepiece.bpe.model" )
with open(__UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (vocab_file,)
| 65
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Dict = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def _lowerCamelCase ( self , snake_case__=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 128
|
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = len(_lowerCAmelCase)
UpperCamelCase_ = len(matrix[0])
UpperCamelCase_ = min(_lowerCAmelCase , _lowerCAmelCase)
for row in range(_lowerCAmelCase):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCAmelCase):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(_lowerCAmelCase , _lowerCAmelCase):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , _lowerCAmelCase):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(_lowerCAmelCase):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 1
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase : str = logging.getLogger(__name__)
def _snake_case ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = False , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
__SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
__SCREAMING_SNAKE_CASE = []
# custom device map
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(device_map.keys() ) > 1:
__SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__SCREAMING_SNAKE_CASE = get_keys_to_not_convert(lowerCAmelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase__ )
# compatibility with peft
__SCREAMING_SNAKE_CASE = load_in_abit
__SCREAMING_SNAKE_CASE = load_in_abit
__SCREAMING_SNAKE_CASE = get_parameter_device(lowerCAmelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
__SCREAMING_SNAKE_CASE = replace_with_bnb_layers(lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
# convert param to the right dtype
__SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__SCREAMING_SNAKE_CASE = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase__ ):
param.to(lowerCAmelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
__SCREAMING_SNAKE_CASE = replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = get_quantized_model_device_map(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_memory=lowerCAmelCase__ , no_split_module_classes=lowerCAmelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase__ , offload_state_dict=lowerCAmelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase__ , device_map=lowerCAmelCase__ , offload_dir=lowerCAmelCase__ )
def _snake_case ( a__ , a__ , a__=None , a__=None , a__=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
__SCREAMING_SNAKE_CASE = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = special_dtypes
__SCREAMING_SNAKE_CASE = no_split_module_classes
__SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__SCREAMING_SNAKE_CASE = get_balanced_memory(
lowerCAmelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = max_memory
__SCREAMING_SNAKE_CASE = infer_auto_device_map(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# check if don't have any quantized module on the cpu
__SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__SCREAMING_SNAKE_CASE = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _snake_case ( a__ , a__ , a__=None , a__=None ):
"""simple docstring"""
if modules_to_not_convert is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _snake_case ( a__ , a__ , a__=None , a__=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
for name, module in model.named_children():
if current_key_name is None:
__SCREAMING_SNAKE_CASE = []
current_key_name.append(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__SCREAMING_SNAKE_CASE = """.""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__SCREAMING_SNAKE_CASE = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
__SCREAMING_SNAKE_CASE = module.weight.data
if module.bias is not None:
__SCREAMING_SNAKE_CASE = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = True
if len(list(module.children() ) ) > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( a__ ):
"""simple docstring"""
with init_empty_weights():
__SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__SCREAMING_SNAKE_CASE = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__SCREAMING_SNAKE_CASE = sum(lowerCAmelCase__ , [] )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
__SCREAMING_SNAKE_CASE = False
if hasattr(lowerCAmelCase__ , """base_model_prefix""" ):
__SCREAMING_SNAKE_CASE = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__SCREAMING_SNAKE_CASE = list(model.named_children() )
__SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
__SCREAMING_SNAKE_CASE = [""".weight""", """.bias"""]
__SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__SCREAMING_SNAKE_CASE = name.replace(lowerCAmelCase__ , """""" )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
def _snake_case ( a__ ):
"""simple docstring"""
for m in model.modules():
if isinstance(lowerCAmelCase__ , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( a__ ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _snake_case ( a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 0 , dtype=lowerCAmelCase__ , value=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = param_name
__SCREAMING_SNAKE_CASE = model
if "." in tensor_name:
__SCREAMING_SNAKE_CASE = tensor_name.split(""".""" )
for split in splits[:-1]:
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__SCREAMING_SNAKE_CASE = new_module
__SCREAMING_SNAKE_CASE = splits[-1]
# offload weights
__SCREAMING_SNAKE_CASE = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase__ , index=lowerCAmelCase__ , )
else:
offload_weight(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
offload_weight(lowerCAmelCase__ , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase__ , index=lowerCAmelCase__ )
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , """meta""" , dtype=lowerCAmelCase__ , value=torch.empty(*param.size() ) )
| 360
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__SCREAMING_SNAKE_CASE = b * b - 4 * a * c
__SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a)
__SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 331
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: List[str] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class a__( lowerCamelCase__ ):
lowercase__ = """camembert"""
def __init__( self : List[Any] , __snake_case : str=3_05_22 , __snake_case : int=7_68 , __snake_case : List[str]=12 , __snake_case : List[Any]=12 , __snake_case : str=30_72 , __snake_case : Optional[int]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : List[str]=2 , __snake_case : str=0.02 , __snake_case : Any=1e-1_2 , __snake_case : int=1 , __snake_case : Dict=0 , __snake_case : Tuple=2 , __snake_case : Optional[Any]="absolute" , __snake_case : Dict=True , __snake_case : int=None , **__snake_case : Dict , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
a : Tuple = vocab_size
a : Tuple = hidden_size
a : str = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : List[str] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Optional[Any] = type_vocab_size
a : Union[str, Any] = initializer_range
a : str = layer_norm_eps
a : str = position_embedding_type
a : Any = use_cache
a : str = classifier_dropout
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : int ):
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 297
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , **lowerCamelCase :Dict ) -> int:
super().__init__(**lowerCamelCase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase )
def UpperCAmelCase_ ( self :Any , **lowerCamelCase :int ) -> int:
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , *lowerCamelCase :str , lowerCamelCase :Optional[Any]=None , lowerCamelCase :int=None , **lowerCamelCase :Optional[Any] ) -> str:
return super().__call__(lowerCamelCase , *lowerCamelCase , num_workers=lowerCamelCase , batch_size=lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :str , lowerCamelCase :Optional[Any]=64 , lowerCamelCase :int = 0 , lowerCamelCase :float = 512 / 1500 , lowerCamelCase :Optional[int] = 32 , lowerCamelCase :Optional[int] = 1 , ) -> Any:
UpperCAmelCase__ = load_image(lowerCamelCase )
UpperCAmelCase__ = self.image_processor.size["longest_edge"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.image_processor(images=lowerCamelCase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(lowerCamelCase , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[str] , lowerCamelCase :Union[str, Any]=0.88 , lowerCamelCase :Optional[Any]=0.95 , lowerCamelCase :Tuple=0 , lowerCamelCase :Union[str, Any]=1 , ) -> Dict:
UpperCAmelCase__ = model_inputs.pop("input_boxes" )
UpperCAmelCase__ = model_inputs.pop("is_last" )
UpperCAmelCase__ = model_inputs.pop("original_sizes" ).tolist()
UpperCAmelCase__ = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCAmelCase__ = self.model(**lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs["pred_masks"]
UpperCAmelCase__ = self.image_processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , binarize=lowerCamelCase )
UpperCAmelCase__ = model_outputs["iou_scores"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ ( self :int , lowerCamelCase :str , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :int=0.7 , ) -> Union[str, Any]:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = defaultdict(lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 169
| 0
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_A = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_A = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowercase_ ( A__ , A__ ) -> Dict:
"""simple docstring"""
snake_case = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case = int(re.match(r".*layer_(\d*).*" , A__ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def lowercase_ ( A__ ) -> List[str]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
snake_case = re.search(r"[^\d](\d+)$" , str(A__ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
snake_case = int(bit_search.groups()[0] )
return bit_size // 8
def lowercase_ ( A__ , A__ , A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
if bloom_config_file == "":
snake_case = BloomConfig()
else:
snake_case = BloomConfig.from_json_file(A__ )
if shard_model:
snake_case = os.listdir(A__ )
snake_case = sorted(filter(lambda A__ : s.startswith("layer" ) and "model_00" in s , A__ ) )
snake_case = {"weight_map": {}, "metadata": {}}
snake_case = 0
snake_case = None
snake_case = BloomConfig()
for j, file in enumerate(A__ ):
print("Processing file: {}".format(A__ ) )
snake_case = None
for i in range(A__ ):
# load all TP files
snake_case = file.replace("model_00" , F'model_0{i}' )
snake_case = torch.load(os.path.join(A__ , A__ ) , map_location="cpu" )
# Rename keys in the transformers names
snake_case = list(temp.keys() )
for key in keys:
snake_case = temp.pop(A__ )
if tensors is None:
snake_case = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
snake_case = BloomConfig()
snake_case = pytorch_dump_folder_path + "/" + CONFIG_NAME
snake_case = total_size
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
snake_case = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
f.write(A__ )
else:
snake_case = BloomModel(A__ )
snake_case = os.listdir(A__ )
snake_case = sorted(filter(lambda A__ : s.startswith("layer" ) and "model_00" in s , A__ ) )
snake_case = None
for i, file in enumerate(A__ ):
snake_case = None
for i in range(A__ ):
# load all TP files
snake_case = file.replace("model_00" , F'model_0{i}' )
snake_case = torch.load(os.path.join(A__ , A__ ) , map_location="cpu" )
# Rename keys in the transformers names
snake_case = list(temp.keys() )
for key in keys:
snake_case = temp.pop(A__ )
if tensors is None:
snake_case = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case = tensors[key] / pretraining_tp
snake_case = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
snake_case = set(other_keys.missing_keys )
else:
snake_case = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
snake_case = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
snake_case = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
snake_case = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_A = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 137
|
from collections import defaultdict
class lowerCamelCase :
def __init__(self : Tuple , _A : Optional[int] , _A : List[str] ) -> Union[str, Any]:
snake_case = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_A ) )
]
snake_case = defaultdict(_A ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case = (1 << len(_A )) - 1
def UpperCAmelCase(self : str , _A : Optional[Any] , _A : List[Any] ) -> str:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case = self.count_ways_until(_A , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase(self : Any , _A : Dict ) -> Optional[Any]:
# Store the list of persons for each task
for i in range(len(_A ) ):
for j in task_performed[i]:
self.task[j].append(_A )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 137
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.