code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = []
UpperCAmelCase = [1] * len(_a )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_a )
print(max(_a ) )
# Adjacency list of Graph
A ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 34
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
| 1
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : GenericTensor ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : List[str] , _lowercase : GenericTensor ):
__UpperCAmelCase = self.get_masked_index(_lowercase )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : Optional[int] , _lowercase : GenericTensor ):
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] , _lowercase : Tuple=None , **_lowercase : Tuple ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any]=5 , _lowercase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(_lowercase , 0 )
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def a ( self : str , _lowercase : List[Any] , _lowercase : List[Any]=None ):
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(_lowercase , _lowercase )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )['''input_ids''']
if len(_lowercase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(_lowercase )
return target_ids
def a ( self : int , _lowercase : Dict=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(_lowercase , _lowercase )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Optional[Any] , *_lowercase : Union[str, Any] , **_lowercase : int ):
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 86
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Union[str, Any] = ["onnx"]
def __init__( self : Any , *_lowercase : Dict , **_lowercase : Any ):
requires_backends(self , ['''onnx'''] )
@classmethod
def a ( cls : str , *_lowercase : List[Any] , **_lowercase : int ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def a ( cls : Union[str, Any] , *_lowercase : List[str] , **_lowercase : Optional[int] ):
requires_backends(cls , ['''onnx'''] )
| 86
| 1
|
'''simple docstring'''
import math
def __A ( lowerCamelCase_ = 1_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = sum(i * i for i in range(1 , n + 1 ) )
SCREAMING_SNAKE_CASE : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 323
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323
| 1
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
snake_case : List[str] = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE__ , filename='''pytorch_model.bin''' ) )
snake_case : Optional[int] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
snake_case : Tuple = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
snake_case : Optional[int] = tensor_value
snake_case : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ , state_dict=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert tokenizer
snake_case : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 83
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """efficientnet"""
def __init__( self : str , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 600 , UpperCamelCase__ : float = 2.0 , UpperCamelCase__ : float = 3.1 , UpperCamelCase__ : int = 8 , UpperCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase__ : List[int] = [] , UpperCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ : float = 0.25 , UpperCamelCase__ : str = "swish" , UpperCamelCase__ : int = 2560 , UpperCamelCase__ : str = "mean" , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 0.001 , UpperCamelCase__ : float = 0.99 , UpperCamelCase__ : float = 0.5 , UpperCamelCase__ : float = 0.2 , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : Dict = num_channels
snake_case : List[Any] = image_size
snake_case : Any = width_coefficient
snake_case : int = depth_coefficient
snake_case : List[str] = depth_divisor
snake_case : Tuple = kernel_sizes
snake_case : Optional[Any] = in_channels
snake_case : Optional[Any] = out_channels
snake_case : Dict = depthwise_padding
snake_case : Optional[Any] = strides
snake_case : List[str] = num_block_repeats
snake_case : Any = expand_ratios
snake_case : Any = squeeze_expansion_ratio
snake_case : Optional[Any] = hidden_act
snake_case : Optional[int] = hidden_dim
snake_case : Dict = pooling_type
snake_case : Any = initializer_range
snake_case : Optional[Any] = batch_norm_eps
snake_case : Tuple = batch_norm_momentum
snake_case : Any = dropout_rate
snake_case : str = drop_connect_rate
snake_case : Dict = sum(UpperCamelCase__ ) * 4
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self : Tuple ) -> float:
"""simple docstring"""
return 1e-5
| 83
| 1
|
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(A__ ) * int(A__ ) ) , n[i : i + 13] ) )
for i in range(len(A__ ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( A__ : bool = True , *A__ : int , **A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_lowercase =False
if main_process_only:
_lowercase =PartialState().local_process_index == 0
return _tqdm(*A__ , **A__ , disable=A__ )
| 205
| 0
|
from itertools import count
def __a ( _SCREAMING_SNAKE_CASE = 50 ) ->int:
a__: Optional[Any] = [1] * min_block_length
for n in count(_SCREAMING_SNAKE_CASE ):
fill_count_functions.append(1 )
for block_length in range(_SCREAMING_SNAKE_CASE , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 367
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = DebertaTokenizer
a__ = True
a__ = DebertaTokenizerFast
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__: List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
a__: List[str] = dict(zip(lowercase , range(len(lowercase))))
a__: Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__: Optional[Any] = {'unk_token': '[UNK]'}
a__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Tuple = 'lower newer'
a__: int = 'lower newer'
return input_text, output_text
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.get_tokenizer()
a__: List[Any] = 'lower newer'
a__: Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a__: Optional[Any] = tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: int = tokens + [tokenizer.unk_token]
a__: Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = self.get_tokenizer()
a__: Any = tokenizer('Hello' , 'World')
a__: Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowercase)
@slow
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = self.tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase)
a__: Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase)
a__: List[str] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
a__: int = tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: List[str] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
a__: Dict = tokenizer(lowercase , padding=lowercase)
a__: Union[str, Any] = [tokenizer.decode(lowercase , skip_special_tokens=lowercase) for seq in encoding['input_ids']]
# fmt: off
a__: Any = {
'input_ids': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a__: str = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowercase)
for expected, decoded in zip(lowercase , lowercase):
self.assertEqual(lowercase , lowercase)
| 203
| 0
|
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
UpperCAmelCase : int = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase : Optional[Any] = str(bin(UpperCAmelCase ) )[2:]
UpperCAmelCase : Tuple = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
UpperCAmelCase : str = [randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCAmelCase : Any = randint(-5_000 , 5_000 )
return (arr, r)
_lowerCamelCase : Any = make_dataset()
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCAmelCase , 3 ):
if sum(UpperCAmelCase ) == target:
return tuple(sorted(UpperCAmelCase ) )
return (0, 0, 0)
def a__ ( UpperCAmelCase : list[int] , UpperCAmelCase : int ) -> tuple[int, int, int]:
arr.sort()
UpperCAmelCase : Tuple = len(UpperCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase , UpperCAmelCase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
UpperCAmelCase : Tuple = '''
triplet_sum1(*dataset)
'''
UpperCAmelCase : List[str] = '''
triplet_sum2(*dataset)
'''
UpperCAmelCase : Tuple = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
UpperCAmelCase : str = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10_000 )
return (min(UpperCAmelCase ), min(UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCamelCase : int = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 336
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self : List[str] , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 32 , __UpperCAmelCase : List[str]=PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Tuple , ):
a : List[Any] = do_resize
a : str = do_rescale
a : Optional[Any] = size_divisor
a : Any = resample
super().__init__(**__UpperCAmelCase)
def __snake_case ( self : List[str] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[ChannelDimension] = None , **__UpperCAmelCase : List[str]):
a : Optional[int] = get_image_size(__UpperCAmelCase)
# Rounds the height and width down to the closest multiple of size_divisor
a : str = height // size_divisor * size_divisor
a : int = width // size_divisor * size_divisor
a : str = resize(__UpperCAmelCase , (new_h, new_w) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
return image
def __snake_case ( self : str , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : Optional[ChannelDimension] = None , **__UpperCAmelCase : str):
return rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[TensorType, str]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : List[str] , ):
a : Dict = do_resize if do_resize is not None else self.do_resize
a : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
a : List[str] = size_divisor if size_divisor is not None else self.size_divisor
a : Tuple = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing")
a : Tuple = make_list_of_images(__UpperCAmelCase)
if not valid_images(__UpperCAmelCase):
raise ValueError("Invalid image(s)")
# All transformations expect numpy arrays.
a : Union[str, Any] = [to_numpy_array(__UpperCAmelCase) for img in images]
if do_resize:
a : List[Any] = [self.resize(__UpperCAmelCase , size_divisor=__UpperCAmelCase , resample=__UpperCAmelCase) for image in images]
if do_rescale:
a : Dict = [self.rescale(__UpperCAmelCase , scale=1 / 255) for image in images]
a : List[str] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase) for image in images]
a : Dict = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase)
| 359
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowercase = True
except ImportError:
__lowercase = False
try:
from torch.hub import _get_torch_home
__lowercase = _get_torch_home()
except ImportError:
__lowercase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
__lowercase = os.path.join(torch_cache_home, """transformers""")
__lowercase = """https://cdn.huggingface.co"""
__lowercase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
__lowercase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
__lowercase = os.path.join(PATH, """config.yaml""")
__lowercase = os.path.join(PATH, """attributes.txt""")
__lowercase = os.path.join(PATH, """objects.txt""")
__lowercase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
__lowercase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
__lowercase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
__lowercase = """pytorch_model.bin"""
__lowercase = """config.yaml"""
def lowercase ( A_=OBJECTS , A_=ATTRIBUTES )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
a : Union[str, Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Dict = OrderedDict()
with open(A_ , "rb" ) as f:
a : Optional[Any] = pkl.load(A_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
a : Dict = ckp.pop(A_ )
if isinstance(A_ , np.ndarray ):
a : Optional[Any] = torch.tensor(A_ )
else:
assert isinstance(A_ , torch.tensor ), type(A_ )
a : int = v
return r
class _A :
"""simple docstring"""
UpperCAmelCase : int = {}
def __init__( self : Any , __UpperCAmelCase : dict , __UpperCAmelCase : str = "root" , __UpperCAmelCase : Optional[int]=0):
a : List[str] = name
a : Tuple = level
a : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a : List[Any] = copy.deepcopy(__UpperCAmelCase)
a : int = copy.deepcopy(__UpperCAmelCase)
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Union[str, Any] = Config(__UpperCAmelCase , name=__UpperCAmelCase , level=level + 1)
a : Dict = v
setattr(self , __UpperCAmelCase , __UpperCAmelCase)
a : Tuple = d
def __repr__( self : List[str]):
return str(list((self._pointer.keys())))
def __setattr__( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : Optional[Any] = val
a : Tuple = val
a : Dict = key.split(".")
a : Union[str, Any] = len(__UpperCAmelCase) - 1
a : Optional[int] = self._pointer
if len(__UpperCAmelCase) > 1:
for i, l in enumerate(__UpperCAmelCase):
if hasattr(self , __UpperCAmelCase) and isinstance(getattr(self , __UpperCAmelCase) , __UpperCAmelCase):
setattr(getattr(self , __UpperCAmelCase) , ".".join(levels[i:]) , __UpperCAmelCase)
if l == last_level:
a : int = val
else:
a : str = pointer[l]
def __snake_case ( self : str):
return self._pointer
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any]):
with open(f'''{file_name}''' , "w") as stream:
dump(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int):
with open(f'''{file_name}''' , "w") as stream:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : Dict):
with open(__UpperCAmelCase) as stream:
a : List[str] = load(__UpperCAmelCase , Loader=__UpperCAmelCase)
return data
def __str__( self : Tuple):
a : str = " "
if self._name != "root":
a : List[str] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
a : Optional[Any] = ""
a : List[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(__UpperCAmelCase).__name__})\n'''
a : Tuple = level
return r[:-1]
@classmethod
def __snake_case ( cls : str , __UpperCAmelCase : str , **__UpperCAmelCase : List[Any]):
a , a : Tuple = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
return cls(__UpperCAmelCase)
@classmethod
def __snake_case ( cls : Union[str, Any] , __UpperCAmelCase : str , **__UpperCAmelCase : List[str]):
a : int = kwargs.pop("cache_dir" , __UpperCAmelCase)
a : List[Any] = kwargs.pop("force_download" , __UpperCAmelCase)
a : Optional[int] = kwargs.pop("resume_download" , __UpperCAmelCase)
a : Tuple = kwargs.pop("proxies" , __UpperCAmelCase)
a : int = kwargs.pop("local_files_only" , __UpperCAmelCase)
if os.path.isdir(__UpperCAmelCase):
a : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase)
elif os.path.isfile(__UpperCAmelCase) or is_remote_url(__UpperCAmelCase):
a : List[Any] = pretrained_model_name_or_path
else:
a : int = hf_bucket_url(__UpperCAmelCase , filename=__UpperCAmelCase , use_cdn=__UpperCAmelCase)
try:
# Load from URL or cache if already cached
a : Optional[Any] = cached_path(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a : Union[str, Any] = Config.load_yaml(__UpperCAmelCase)
except EnvironmentError:
a : str = "Can't load config for"
raise EnvironmentError(__UpperCAmelCase)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(__UpperCAmelCase), kwargs
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Tuple = torch.load("dump.pt" , map_location=in_tensor.device )
a : Any = in_tensor.numpy()
a : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = urlparse(A_ )
return parsed.scheme in ("http", "https")
def lowercase ( A_ , A_ , A_=True )-> str:
'''simple docstring'''
a : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a : str = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase ( A_ , A_ , A_=None , A_=0 , A_=None , )-> List[str]:
'''simple docstring'''
a : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A_ , A_ ):
ua += "; " + "; ".join("{}/{}".format(A_ , A_ ) for k, v in user_agent.items() )
elif isinstance(A_ , A_ ):
ua += "; " + user_agent
a : str = {"user-agent": ua}
if resume_size > 0:
a : List[Any] = "bytes=%d-" % (resume_size,)
a : str = requests.get(A_ , stream=A_ , proxies=A_ , headers=A_ )
if response.status_code == 416: # Range not satisfiable
return
a : Optional[int] = response.headers.get("Content-Length" )
a : List[Any] = resume_size + int(A_ ) if content_length is not None else None
a : List[Any] = tqdm(
unit="B" , unit_scale=A_ , total=A_ , initial=A_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A_ ) )
temp_file.write(A_ )
progress.close()
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=10 , A_=False , A_=None , A_=False , )-> str:
'''simple docstring'''
if cache_dir is None:
a : List[Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : Tuple = str(A_ )
os.makedirs(A_ , exist_ok=A_ )
a : Optional[Any] = None
if not local_files_only:
try:
a : Dict = requests.head(A_ , allow_redirects=A_ , proxies=A_ , timeout=A_ )
if response.status_code == 200:
a : int = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a : List[str] = url_to_filename(A_ , A_ )
# get cache path to put the file
a : List[str] = os.path.join(A_ , A_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A_ ):
return cache_path
else:
a : Any = [
file
for file in fnmatch.filter(os.listdir(A_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A_ ) > 0:
return os.path.join(A_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a : Dict = cache_path + ".lock"
with FileLock(A_ ):
# If the download just completed while the lock was activated.
if os.path.exists(A_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a : Optional[Any] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A_ , "a+b" ) as f:
yield f
a : Tuple = _resumable_file_manager
if os.path.exists(A_ ):
a : Optional[Any] = os.stat(A_ ).st_size
else:
a : Optional[int] = 0
else:
a : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=A_ , delete=A_ )
a : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A_ , temp_file.name , )
http_get(
A_ , A_ , proxies=A_ , resume_size=A_ , user_agent=A_ , )
os.replace(temp_file.name , A_ )
a : List[str] = {"url": url, "etag": etag}
a : Tuple = cache_path + ".json"
with open(A_ , "w" ) as meta_file:
json.dump(A_ , A_ )
return cache_path
def lowercase ( A_ , A_=None )-> Any:
'''simple docstring'''
a : Dict = url.encode("utf-8" )
a : Optional[Any] = shaaaa(A_ )
a : Any = url_hash.hexdigest()
if etag:
a : Union[str, Any] = etag.encode("utf-8" )
a : Tuple = shaaaa(A_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=False , A_=None , A_=False , A_=False , A_=False , )-> Tuple:
'''simple docstring'''
if cache_dir is None:
a : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : List[Any] = str(A_ )
if isinstance(A_ , A_ ):
a : int = str(A_ )
if is_remote_url(A_ ):
# URL, so get it from the cache (downloading if necessary)
a : Optional[Any] = get_from_cache(
A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , user_agent=A_ , local_files_only=A_ , )
elif os.path.exists(A_ ):
# File, and it exists.
a : Union[str, Any] = url_or_filename
elif urlparse(A_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A_ ) )
if extract_compressed_file:
if not is_zipfile(A_ ) and not tarfile.is_tarfile(A_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a , a : Dict = os.path.split(A_ )
a : List[str] = output_file.replace("." , "-" ) + "-extracted"
a : Optional[Any] = os.path.join(A_ , A_ )
if os.path.isdir(A_ ) and os.listdir(A_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a : Tuple = output_path + ".lock"
with FileLock(A_ ):
shutil.rmtree(A_ , ignore_errors=A_ )
os.makedirs(A_ )
if is_zipfile(A_ ):
with ZipFile(A_ , "r" ) as zip_file:
zip_file.extractall(A_ )
zip_file.close()
elif tarfile.is_tarfile(A_ ):
a : List[str] = tarfile.open(A_ )
tar_file.extractall(A_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A_ ) )
return output_path_extracted
return output_path
def lowercase ( A_ , A_="," )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
with open(A_ ) as f:
a : str = eval(f.read() )
else:
a : List[Any] = requests.get(A_ )
try:
a : Any = requests.json()
except Exception:
a : Any = req.content.decode()
assert data is not None, "could not connect"
try:
a : Optional[Any] = eval(A_ )
except Exception:
a : Any = data.split("\n" )
req.close()
return data
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Optional[int] = requests.get(A_ )
a : List[str] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A_ )
with open(A_ , "rb" ) as stream:
a : Any = pkl.load(A_ )
a : List[str] = weights.pop("model" )
a : Dict = {}
for k, v in model.items():
a : List[str] = torch.from_numpy(A_ )
if "running_var" in k:
a : Dict = torch.tensor([0] )
a : Any = k.replace("running_var" , "num_batches_tracked" )
a : List[Any] = zero
return new
def lowercase ( )-> Optional[int]:
'''simple docstring'''
print(F'''{os.path.abspath(os.path.join(A_ , os.pardir ) )}/demo.ipynb''' )
def lowercase ( A_ , A_="RGB" )-> Any:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
a : Dict = cva.imread(A_ )
else:
a : Union[str, Any] = get_image_from_url(A_ )
assert img is not None, F'''could not connect to: {im}'''
a : int = cva.cvtColor(A_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a : List[str] = img[:, :, ::-1]
return img
def lowercase ( A_ , A_=1 )-> int:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A_ ) , A_ ))
| 226
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# save results
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , 'config.json' ) ):
os.remove(os.path.join(_UpperCamelCase , 'config.json' ) )
if os.path.exists(os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : Optional[Any] = 2
if unlogit:
__lowerCAmelCase : str = torch.pow(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = p * torch.log(_UpperCamelCase )
__lowerCAmelCase : Any = 0
return -plogp.sum(dim=-1 )
def __lowerCAmelCase (_UpperCamelCase ):
logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : int = model.config.num_hidden_layers, model.config.num_attention_heads
__lowerCAmelCase : List[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
__lowerCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
__lowerCAmelCase : int = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowerCAmelCase : int = None
__lowerCAmelCase : Tuple = 0.0
__lowerCAmelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__lowerCAmelCase : List[str] = tuple(t.to(args.device ) for t in inputs )
((__lowerCAmelCase) , ) : Tuple = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
__lowerCAmelCase : int = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Tuple = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
__lowerCAmelCase : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_UpperCamelCase )
logger.info('Head ranked by importance scores' )
__lowerCAmelCase : int = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__lowerCAmelCase : Dict = torch.arange(
head_importance.numel() , device=args.device )
__lowerCAmelCase : Any = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
__lowerCAmelCase : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _UpperCamelCase , original_score * args.masking_threshold )
__lowerCAmelCase : Optional[int] = torch.ones_like(_UpperCamelCase )
__lowerCAmelCase : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__lowerCAmelCase : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__lowerCAmelCase : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowerCAmelCase : Optional[Any] = float('Inf' )
__lowerCAmelCase : Tuple = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__lowerCAmelCase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__lowerCAmelCase : Optional[int] = new_head_mask.view(-1 )
__lowerCAmelCase : Optional[int] = 0.0
__lowerCAmelCase : Optional[Any] = new_head_mask.view_as(_UpperCamelCase )
__lowerCAmelCase : Any = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
__lowerCAmelCase : Tuple = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 1 / loss
__lowerCAmelCase : Dict = datetime.now() - before_time
__lowerCAmelCase : str = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
__lowerCAmelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
__lowerCAmelCase : str = datetime.now()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
__lowerCAmelCase : Optional[Any] = 1 / loss
__lowerCAmelCase : Optional[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _UpperCamelCase , _UpperCamelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_UpperCamelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_UpperCamelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_UpperCamelCase , type=_UpperCamelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_UpperCamelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_UpperCamelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_UpperCamelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_UpperCamelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_UpperCamelCase , help='Batch size.' )
parser.add_argument('--seed' , type=_UpperCamelCase , default=42 )
parser.add_argument('--local_rank' , type=_UpperCamelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_UpperCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_UpperCamelCase , default='' , help='Can be used for distant debugging.' )
__lowerCAmelCase : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowerCAmelCase : List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__lowerCAmelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowerCAmelCase : Dict = torch.device('cuda' , args.local_rank )
__lowerCAmelCase : Union[str, Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__lowerCAmelCase : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowerCAmelCase : int = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
__lowerCAmelCase : Tuple = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _UpperCamelCase )
# Prepare dataset
__lowerCAmelCase : List[str] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__lowerCAmelCase : List[Any] = (torch.from_numpy(_UpperCamelCase ),)
__lowerCAmelCase : Optional[int] = TensorDataset(*_UpperCamelCase )
__lowerCAmelCase : Tuple = RandomSampler(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowerCAmelCase : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 86
|
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __A ( lowerCAmelCase_ = "isbn/0140328726" ):
_UpperCAmelCase : int = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_UpperCAmelCase : Union[str, Any] = f"{olid} is not a valid Open Library olid"
raise ValueError(lowerCAmelCase_ )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_UpperCAmelCase : str = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase : List[str] = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_UpperCAmelCase : Tuple = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = """, """.join(lowerCAmelCase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase_ : Optional[int] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
lowerCAmelCase_ : int = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 170
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase_ : Any = logging.getLogger(__name__)
class __lowerCAmelCase ( __a ):
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
_UpperCAmelCase : str = self.layer[current_layer](lowerCAmelCase__ , lowerCAmelCase__ , head_mask[current_layer] )
_UpperCAmelCase : List[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __a , )
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = BertEncoderWithPabee(lowerCAmelCase__ )
self.init_weights()
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : int = 0
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = threshold
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = patience
def snake_case_ (self ):
_UpperCAmelCase : int = 0
_UpperCAmelCase : Optional[Any] = 0
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
_UpperCAmelCase : Optional[int] = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(lowerCAmelCase__ )
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_UpperCAmelCase : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
_UpperCAmelCase : str = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_UpperCAmelCase : Optional[Any] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if token_type_ids is None:
_UpperCAmelCase : Optional[int] = torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = encoder_hidden_states.size()
_UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_UpperCAmelCase : Tuple = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.invert_attention_mask(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_UpperCAmelCase : Any = self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers )
_UpperCAmelCase : int = self.embeddings(
input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = embedding_output
if self.training:
_UpperCAmelCase : Union[str, Any] = []
for i in range(self.config.num_hidden_layers ):
_UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
_UpperCAmelCase : Any = self.pooler(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = output_layers[i](output_dropout(lowerCAmelCase__ ) )
res.append(lowerCAmelCase__ )
elif self.patience == 0: # Use all layers for inference
_UpperCAmelCase : int = self.encoder(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = self.pooler(encoder_outputs[0] )
_UpperCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase__ )]
else:
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : int = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_UpperCAmelCase : int = self.encoder.adaptive_forward(
lowerCAmelCase__ , current_layer=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.pooler(lowerCAmelCase__ )
_UpperCAmelCase : int = output_layers[i](lowerCAmelCase__ )
if regression:
_UpperCAmelCase : List[Any] = logits.detach()
if patient_result is not None:
_UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_UpperCAmelCase : List[str] = 0
else:
_UpperCAmelCase : Optional[int] = logits.detach().argmax(dim=1 )
if patient_result is not None:
_UpperCAmelCase : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase__ ) ):
patient_counter += 1
else:
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[str] = logits
if patient_counter == self.patience:
break
_UpperCAmelCase : List[str] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , __a , )
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ ):
super().__init__(lowerCAmelCase__ )
_UpperCAmelCase : int = config.num_labels
_UpperCAmelCase : List[Any] = BertModelWithPabee(lowerCAmelCase__ )
_UpperCAmelCase : int = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase : str = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
_UpperCAmelCase : Optional[int] = self.bert(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_UpperCAmelCase : Any = (logits[-1],)
if labels is not None:
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = 0
for ix, logits_item in enumerate(lowerCAmelCase__ ):
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : Dict = MSELoss()
_UpperCAmelCase : List[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase : Optional[Any] = CrossEntropyLoss()
_UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_UpperCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 170
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 83
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : List[str] = torch.load(a_ , map_location='cpu' )
if "model" in sd.keys():
lowerCAmelCase__ : Union[str, Any] = torch.load(a_ , map_location='cpu' )['model']
# pop unnecessary weights
lowerCAmelCase__ : int = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(a_ )
lowerCAmelCase__ : Optional[int] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCAmelCase__ : Tuple = sd.pop(a_ )
lowerCAmelCase__ : Optional[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCAmelCase__ : Union[str, Any] = sd[key]
# We split QKV in separate Q,K,V
lowerCAmelCase__ : Dict = key.replace('.qkv_proj.' , '.q_proj.' )
lowerCAmelCase__ : Union[str, Any] = key.replace('.qkv_proj.' , '.k_proj.' )
lowerCAmelCase__ : Dict = key.replace('.qkv_proj.' , '.v_proj.' )
lowerCAmelCase__ : Optional[int] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = torch.split(a_ , depth // 3 , dim=0 )
lowerCAmelCase__ : Optional[Any] = q
lowerCAmelCase__ : List[str] = k
lowerCAmelCase__ : Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
lowerCAmelCase__ : Tuple = load_checkpoint(a_ )
if config is not None:
lowerCAmelCase__ : str = OPTConfig.from_pretrained(a_ )
else:
lowerCAmelCase__ : int = OPTConfig()
lowerCAmelCase__ : Any = OPTModel(a_ ).half().eval()
model.load_state_dict(a_ )
# Check results
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowerCamelCase__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 364
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 0
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__lowerCAmelCase : List[Any] = 'hf-internal-testing/tiny-random-bert'
__lowerCAmelCase : str = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__lowerCAmelCase : int = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
__magic_name__ = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
__magic_name__ = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
__magic_name__ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""9b8c223""" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """snapshots""" , UpperCamelCase__ , UpperCamelCase__ ) )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
__magic_name__ = cached_file("""tiny-random-bert""" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
__magic_name__ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="""aaaa""" )
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
__magic_name__ = cached_file(UpperCamelCase__ , """conf""" )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(UpperCamelCase__ , """does not appear to have a file named""" ):
__magic_name__ = cached_file(UpperCamelCase__ , """conf""" )
with open(os.path.join(UpperCamelCase__ , """refs""" , """main""" ) ) as f:
__magic_name__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , """.no_exist""" , UpperCamelCase__ , """conf""" ) ) )
__magic_name__ = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
__magic_name__ = cached_file(UpperCamelCase__ , """conf""" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
__magic_name__ = mock.Mock()
__magic_name__ = 500
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=UpperCamelCase__ ) as mock_head:
__magic_name__ = cached_file(UpperCamelCase__ , """conf""" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCamelCase__ ) )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ , revision="""ahaha""" )
__magic_name__ = get_file_from_repo("""bert-base-cased""" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
__magic_name__ = json.loads(open(UpperCamelCase__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = Path(UpperCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , """a.txt""" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , """b.txt""" ) )
| 88
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : List[str] = parent
snake_case : int = 13
snake_case : Optional[int] = 7
snake_case : Tuple = True
snake_case : Optional[Any] = True
snake_case : Optional[Any] = False
snake_case : Optional[Any] = True
snake_case : List[Any] = 99
snake_case : Union[str, Any] = 32
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 4
snake_case : Optional[Any] = 37
snake_case : str = "gelu"
snake_case : int = 0.1
snake_case : Any = 0.1
snake_case : Any = 512
snake_case : Union[str, Any] = 16
snake_case : List[str] = 2
snake_case : int = 0.02
snake_case : str = 3
snake_case : Tuple = 4
snake_case : Optional[Any] = None
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
snake_case : int = None
snake_case : Tuple = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = TFDistilBertModel(config=UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : Union[str, Any] = model(UpperCamelCase__ )
snake_case : Tuple = [input_ids, input_mask]
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Tuple = TFDistilBertForMaskedLM(config=UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : List[Any] = TFDistilBertForQuestionAnswering(config=UpperCamelCase__ )
snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
snake_case : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : int = self.num_labels
snake_case : List[str] = TFDistilBertForSequenceClassification(UpperCamelCase__ )
snake_case : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.num_choices
snake_case : Any = TFDistilBertForMultipleChoice(UpperCamelCase__ )
snake_case : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
snake_case : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : str = self.num_labels
snake_case : Tuple = TFDistilBertForTokenClassification(UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.prepare_config_and_inputs()
((snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case)) : Union[str, Any] = config_and_inputs
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[Any] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : int = TFDistilBertModelTester(self )
snake_case : str = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case : Optional[int] = TFDistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : str = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : Any = model(UpperCamelCase__ )[0]
snake_case : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
snake_case : str = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 203
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ : Dict = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase_ : List[Any] = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase_ : int = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase_ : List[str] = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase_ : int = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : str ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict=[1, 10, 100] , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[Any]=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__lowerCAmelCase ) as executor:
UpperCamelCase :Dict = []
UpperCamelCase :Optional[int] = Counter()
UpperCamelCase :Tuple = 0
UpperCamelCase :List[Any] = defaultdict(__lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
for candidate in candidates:
UpperCamelCase :List[str] = candidate + """\n""" + test_case
UpperCamelCase :Any = (test_program, timeout, task_id, completion_id[task_id])
UpperCamelCase :int = executor.submit(__lowerCAmelCase , *__lowerCAmelCase )
futures.append(__lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCAmelCase ):
UpperCamelCase :Optional[int] = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCamelCase , UpperCamelCase :Optional[int] = [], []
for result in results.values():
result.sort()
UpperCamelCase :Union[str, Any] = [r[1]["""passed"""] for r in result]
total.append(len(__lowerCAmelCase ) )
correct.append(sum(__lowerCAmelCase ) )
UpperCamelCase :int = np.array(__lowerCAmelCase )
UpperCamelCase :Union[str, Any] = np.array(__lowerCAmelCase )
UpperCamelCase :str = k
UpperCamelCase :Dict = {F"""pass@{k}""": estimate_pass_at_k(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
def estimator(__magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase :int = itertools.repeat(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
else:
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
UpperCamelCase :Any = iter(lowerCAmelCase__ )
return np.array([estimator(int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) , lowerCAmelCase__ ) for n, c in zip(lowerCAmelCase__ , lowerCAmelCase__ )] )
| 360
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """char"""
snake_case__ : Optional[int] = """bpe"""
snake_case__ : Dict = """wp"""
UpperCAmelCase_ : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = ["""image_processor""", """char_tokenizer"""]
snake_case__ : Dict = """ViTImageProcessor"""
snake_case__ : List[str] = """MgpstrTokenizer"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Any ):
UpperCamelCase :Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCamelCase , )
UpperCamelCase :Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
UpperCamelCase :Optional[int] = tokenizer
UpperCamelCase :int = AutoTokenizer.from_pretrained("""gpt2""" )
UpperCamelCase :int = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str=None , **__lowerCamelCase : Dict ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase :Tuple = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
UpperCamelCase :Any = self.char_tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase :Dict = encodings["""input_ids"""]
return inputs
def _A ( self : Tuple , __lowerCamelCase : str ):
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = sequences
UpperCamelCase :Tuple = char_preds.size(0 )
UpperCamelCase , UpperCamelCase :str = self._decode_helper(__lowerCamelCase , """char""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """bpe""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """wp""" )
UpperCamelCase :Any = []
UpperCamelCase :str = []
for i in range(__lowerCamelCase ):
UpperCamelCase :Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase :Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase :str = scores.index(max(__lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Dict = final_strs
UpperCamelCase :Union[str, Any] = final_scores
UpperCamelCase :List[str] = char_strs
UpperCamelCase :Tuple = bpe_strs
UpperCamelCase :Optional[Any] = wp_strs
return out
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
if format == DecodeType.CHARACTER:
UpperCamelCase :List[str] = self.char_decode
UpperCamelCase :Union[str, Any] = 1
UpperCamelCase :Optional[Any] = """[s]"""
elif format == DecodeType.BPE:
UpperCamelCase :Union[str, Any] = self.bpe_decode
UpperCamelCase :str = 2
UpperCamelCase :int = """#"""
elif format == DecodeType.WORDPIECE:
UpperCamelCase :int = self.wp_decode
UpperCamelCase :Any = 102
UpperCamelCase :int = """[SEP]"""
else:
raise ValueError(F"""Format {format} is not supported.""" )
UpperCamelCase , UpperCamelCase :int = [], []
UpperCamelCase :Any = pred_logits.size(0 )
UpperCamelCase :List[Any] = pred_logits.size(1 )
UpperCamelCase , UpperCamelCase :Optional[int] = pred_logits.topk(1 , dim=-1 , largest=__lowerCamelCase , sorted=__lowerCamelCase )
UpperCamelCase :Optional[Any] = preds_index.view(-1 , __lowerCamelCase )[:, 1:]
UpperCamelCase :int = decoder(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = torch.nn.functional.softmax(__lowerCamelCase , dim=2 ).max(dim=2 )
UpperCamelCase :Tuple = preds_max_prob[:, 1:]
for index in range(__lowerCamelCase ):
UpperCamelCase :Tuple = preds_str[index].find(__lowerCamelCase )
UpperCamelCase :List[Any] = preds_str[index][:pred_eos]
UpperCamelCase :List[Any] = preds_index[index].cpu().tolist()
UpperCamelCase :Optional[Any] = pred_index.index(__lowerCamelCase ) if eos_token in pred_index else -1
UpperCamelCase :List[str] = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase :List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCamelCase )
conf_scores.append(__lowerCamelCase )
return dec_strs, conf_scores
def _A ( self : Optional[Any] , __lowerCamelCase : str ):
UpperCamelCase :Dict = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
return self.bpe_tokenizer.batch_decode(__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Any = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
| 62
| 0
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__snake_case =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__snake_case =[ord(letter) for letter in string.ascii_lowercase]
__snake_case ={ord(char) for char in VALID_CHARS}
__snake_case =["""the""", """be""", """to""", """of""", """and""", """in""", """that""", """have"""]
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : tuple[int, ...] ):
lowerCAmelCase = ""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
for keychar, cipherchar in zip(cycle(_UpperCAmelCase ) , _UpperCAmelCase ):
lowerCAmelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCAmelCase )
return decoded
def a_ ( lowerCamelCase : list[int] ):
lowerCAmelCase = []
for key in product(_UpperCAmelCase , repeat=3 ):
lowerCAmelCase = try_key(_UpperCAmelCase , _UpperCAmelCase )
if encoded is not None:
possibles.append(_UpperCAmelCase )
return possibles
def a_ ( lowerCamelCase : list[str] , lowerCamelCase : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCamelCase : str = "p059_cipher.txt" ):
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = Path(_UpperCAmelCase ).parent.joinpath(_UpperCAmelCase ).read_text(encoding='utf-8' )
lowerCAmelCase = [int(_UpperCAmelCase ) for number in data.strip().split(',' )]
lowerCAmelCase = filter_valid_chars(_UpperCAmelCase )
for common_word in COMMON_WORDS:
lowerCAmelCase = filter_common_word(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
break
lowerCAmelCase = possibles[0]
return sum(ord(_UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ,__UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_tool('''text-to-speech''' )
self.tool.setup()
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = self.tool('''hey''' )
__UpperCAmelCase : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = self.tool('''hey''' )
__UpperCAmelCase : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 226
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 20 , lowerCAmelCase_ = 7_68 , lowerCAmelCase_=77 , lowerCAmelCase_=4 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = "silu" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "linear" , lowerCAmelCase_ = "prd" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
"""simple docstring"""
super().__init__()
_snake_case = num_attention_heads
_snake_case = attention_head_dim
_snake_case = num_attention_heads * attention_head_dim
_snake_case = additional_embeddings
_snake_case = time_embed_dim or inner_dim
_snake_case = embedding_proj_dim or embedding_dim
_snake_case = clip_embed_dim or embedding_dim
_snake_case = Timesteps(__a , __a , 0 )
_snake_case = TimestepEmbedding(__a , __a , out_dim=__a , act_fn=__a )
_snake_case = nn.Linear(__a , __a )
if embedding_proj_norm_type is None:
_snake_case = None
elif embedding_proj_norm_type == "layer":
_snake_case = nn.LayerNorm(__a )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
_snake_case = nn.Linear(__a , __a )
if encoder_hid_proj_type is None:
_snake_case = None
elif encoder_hid_proj_type == "linear":
_snake_case = nn.Linear(__a , __a )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
_snake_case = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __a ) )
if added_emb_type == "prd":
_snake_case = nn.Parameter(torch.zeros(1 , 1 , __a ) )
elif added_emb_type is None:
_snake_case = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
_snake_case = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , activation_fn='gelu' , attention_bias=__a , )
for d in range(__a )
] )
if norm_in_type == "layer":
_snake_case = nn.LayerNorm(__a )
elif norm_in_type is None:
_snake_case = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
_snake_case = nn.LayerNorm(__a )
_snake_case = nn.Linear(__a , __a )
_snake_case = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , __a , persistent=__a )
_snake_case = nn.Parameter(torch.zeros(1 , __a ) )
_snake_case = nn.Parameter(torch.zeros(1 , __a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {}
def fn_recursive_add_processors(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(__a , 'set_processor' ):
_snake_case = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , __a , __a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__a , __a , __a )
return processors
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = len(self.attn_processors.keys() )
if isinstance(__a , __a ) and len(__a ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(__a )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(__a , 'set_processor' ):
if not isinstance(__a , __a ):
module.set_processor(__a )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , __a , __a )
for name, module in self.named_children():
fn_recursive_attn_processor(__a , __a , __a )
def lowerCamelCase ( self ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ):
"""simple docstring"""
_snake_case = hidden_states.shape[0]
_snake_case = timestep
if not torch.is_tensor(__a ):
_snake_case = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__a ) and len(timesteps.shape ) == 0:
_snake_case = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case = timesteps * torch.ones(__a , dtype=timesteps.dtype , device=timesteps.device )
_snake_case = self.time_proj(__a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case = timesteps_projected.to(dtype=self.dtype )
_snake_case = self.time_embedding(__a )
if self.embedding_proj_norm is not None:
_snake_case = self.embedding_proj_norm(__a )
_snake_case = self.embedding_proj(__a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case = self.encoder_hidden_states_proj(__a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case = self.proj_in(__a )
_snake_case = self.positional_embedding.to(hidden_states.dtype )
_snake_case = []
_snake_case = 0
if encoder_hidden_states is not None:
additional_embeds.append(__a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case = hidden_states[:, None, :]
_snake_case = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case = self.prd_embedding.to(hidden_states.dtype ).expand(__a , -1 , -1 )
additional_embeds.append(__a )
_snake_case = torch.cat(
__a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case = F.pad(
__a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case = F.pad(__a , (0, self.additional_embeddings) , value=0.0 )
_snake_case = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case = self.norm_in(__a )
for block in self.transformer_blocks:
_snake_case = block(__a , attention_mask=__a )
_snake_case = self.norm_out(__a )
if self.prd_embedding is not None:
_snake_case = hidden_states[:, -1]
else:
_snake_case = hidden_states[:, additional_embeddings_len:]
_snake_case = self.proj_to_clip_embeddings(__a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__a )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 362
|
'''simple docstring'''
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = name
_snake_case = value
_snake_case = weight
def __repr__( self ):
"""simple docstring"""
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase ( self ):
"""simple docstring"""
return self.value
def lowerCamelCase ( self ):
"""simple docstring"""
return self.name
def lowerCamelCase ( self ):
"""simple docstring"""
return self.weight
def lowerCamelCase ( self ):
"""simple docstring"""
return self.value / self.weight
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> int:
_snake_case = []
for i in range(len(__A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[int]:
_snake_case = sorted(__A , key=__A , reverse=__A )
_snake_case = []
_snake_case , _snake_case = 0.0, 0.0
for i in range(len(__A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] =logging.get_logger(__name__)
_lowercase : Dict ={"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[Any] ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_lowercase : Optional[int] ={
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_lowercase : Any ="▁"
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase :Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :int = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase = None , **__lowercase , ) -> None:
"""simple docstring"""
a__ : Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
a__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
a__ : List[str] = vocab_file
a__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
a__ : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
a__ : Union[str, Any] = len(self.sp_model ) - 1
a__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Optional[Any] = [self.cls_token_id]
a__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : Any = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : int = self.sp_model.PieceToId(__lowercase )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
a__ : Any = []
a__ : int = """"""
a__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
a__ : Optional[int] = True
a__ : str = []
else:
current_sub_tokens.append(__lowercase )
a__ : str = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __getstate__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[int] = self.__dict__.copy()
a__ : int = None
return state
def __setstate__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a__ : Optional[Any] = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Tuple = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , """wb""" ) as fi:
a__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 170
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Optional[Any] ={"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = "openai-gpt"
__lowerCAmelCase :Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __lowercase=4_0_4_7_8 , __lowercase=5_1_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-5 , __lowercase=0.0_2 , __lowercase="cls_index" , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=0.1 , **__lowercase , ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = vocab_size
a__ : Union[str, Any] = n_positions
a__ : int = n_embd
a__ : Dict = n_layer
a__ : Dict = n_head
a__ : List[str] = afn
a__ : List[str] = resid_pdrop
a__ : List[Any] = embd_pdrop
a__ : List[str] = attn_pdrop
a__ : Dict = layer_norm_epsilon
a__ : List[str] = initializer_range
a__ : Tuple = summary_type
a__ : Union[str, Any] = summary_use_proj
a__ : Optional[Any] = summary_activation
a__ : Union[str, Any] = summary_first_dropout
a__ : Optional[Any] = summary_proj_to_labels
super().__init__(**__lowercase )
| 170
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __magic_name__ ( __lowerCAmelCase ):
'''simple docstring'''
__UpperCamelCase = '''deformable_detr'''
__UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _a=True , _a=None , _a=3 , _a=300 , _a=1_024 , _a=6 , _a=1_024 , _a=8 , _a=6 , _a=1_024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=True , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=4 , _a=4 , _a=4 , _a=False , _a=300 , _a=False , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.25 , _a=False , **_a , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCamelCase = backbone_config.get("""model_type""" )
lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase = config_class.from_dict(lowerCAmelCase_ )
lowerCamelCase = use_timm_backbone
lowerCamelCase = backbone_config
lowerCamelCase = num_channels
lowerCamelCase = num_queries
lowerCamelCase = max_position_embeddings
lowerCamelCase = d_model
lowerCamelCase = encoder_ffn_dim
lowerCamelCase = encoder_layers
lowerCamelCase = encoder_attention_heads
lowerCamelCase = decoder_ffn_dim
lowerCamelCase = decoder_layers
lowerCamelCase = decoder_attention_heads
lowerCamelCase = dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = activation_function
lowerCamelCase = init_std
lowerCamelCase = init_xavier_std
lowerCamelCase = encoder_layerdrop
lowerCamelCase = auxiliary_loss
lowerCamelCase = position_embedding_type
lowerCamelCase = backbone
lowerCamelCase = use_pretrained_backbone
lowerCamelCase = dilation
# deformable attributes
lowerCamelCase = num_feature_levels
lowerCamelCase = encoder_n_points
lowerCamelCase = decoder_n_points
lowerCamelCase = two_stage
lowerCamelCase = two_stage_num_proposals
lowerCamelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase = class_cost
lowerCamelCase = bbox_cost
lowerCamelCase = giou_cost
# Loss coefficients
lowerCamelCase = mask_loss_coefficient
lowerCamelCase = dice_loss_coefficient
lowerCamelCase = bbox_loss_coefficient
lowerCamelCase = giou_loss_coefficient
lowerCamelCase = eos_coefficient
lowerCamelCase = focal_alpha
lowerCamelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.d_model
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase = self.backbone_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 365
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_vision_model"
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.00_001 , _a=0.0 , _a=1e-1_0 , _a=True , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
lowerCamelCase = qkv_bias
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip_2_qformer"
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1e-1_2 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = cross_attention_frequency
lowerCamelCase = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
cls._set_token_in_kwargs(_a )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowerCamelCase = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "blip-2"
__UpperCamelCase = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
"""simple docstring"""
super().__init__(**_a )
if vision_config is None:
lowerCamelCase = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowerCamelCase = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowerCamelCase = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCamelCase = BlipaVisionConfig(**_a )
lowerCamelCase = BlipaQFormerConfig(**_a )
lowerCamelCase = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase = CONFIG_MAPPING[text_model_type](**_a )
lowerCamelCase = self.text_config.tie_word_embeddings
lowerCamelCase = self.text_config.is_encoder_decoder
lowerCamelCase = num_query_tokens
lowerCamelCase = self.vision_config.hidden_size
lowerCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase = 1.0
lowerCamelCase = 0.02
@classmethod
def _lowerCAmelCase ( cls , _a , _a , _a , **_a , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.qformer_config.to_dict()
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 168
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : List[str] ,A : List[Any]=3 ,A : Any=32 ,A : Optional[int]=3 ,A : Optional[int]=10 ,A : Optional[Any]=[8, 16, 32, 64] ,A : Optional[Any]=[1, 1, 2, 1] ,A : Any=True ,A : str=True ,A : Any="relu" ,A : Dict=3 ,A : Optional[Any]=None ,A : Dict=["stage2", "stage3", "stage4"] ,A : List[str]=[2, 3, 4] ,A : Union[str, Any]=1 ,):
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = embeddings_size
__A = hidden_sizes
__A = depths
__A = is_training
__A = use_labels
__A = hidden_act
__A = num_labels
__A = scope
__A = len(A )
__A = out_features
__A = out_indices
__A = num_groups
def UpperCamelCase_ ( self : Any ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Tuple ):
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def UpperCamelCase_ ( self : Dict ,A : int ,A : Dict ,A : List[Any] ):
__A = BitModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any] ,A : List[str] ,A : Tuple ):
__A = self.num_labels
__A = BitForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : List[str] ,A : List[str] ):
__A = BitBackbone(config=A )
model.to(A )
model.eval()
__A = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__A = None
__A = BitBackbone(config=A )
model.to(A )
model.eval()
__A = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Optional[int] ):
__A = BitModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : Optional[int] ):
return
@unittest.skip(reason="Bit does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def UpperCamelCase_ ( self : List[str] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(config=A )
for name, module in model.named_modules():
if isinstance(A ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
def UpperCamelCase_ ( self : Tuple ):
def check_hidden_states_output(A : Any ,A : List[str] ,A : Tuple ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A = self.model_tester.num_stages
self.assertEqual(len(A ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A = layer_type
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = BitModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Any ):
__A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (BitBackbone,) if is_torch_available() else ()
snake_case_ = BitConfig
snake_case_ = False
def UpperCamelCase_ ( self : Optional[int] ):
__A = BitModelTester(self )
| 15
|
from math import isclose, sqrt
def a_ ( _A , _A , _A ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(_A , _A ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _A = 1.4 , _A = -9.6 ) -> int:
"""simple docstring"""
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
| 0
|
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/vocab.json")
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def snake_case ( self ):
"""simple docstring"""
snake_case = 0
def snake_case ( self ):
"""simple docstring"""
snake_case = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
snake_case = AutoProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCAmelCase , os.path.join(lowerCAmelCase , lowerCAmelCase ) )
copyfile(lowerCAmelCase , os.path.join(lowerCAmelCase , 'vocab.json' ) )
snake_case = AutoProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
snake_case = WavaVecaProcessor(lowerCAmelCase , lowerCAmelCase )
# save in new folder
processor.save_pretrained(lowerCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'r' ) as f:
snake_case = json.load(lowerCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'w' ) as f:
f.write(json.dumps(lowerCAmelCase ) )
snake_case = AutoProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
snake_case = WavaVecaProcessor(lowerCAmelCase , lowerCAmelCase )
# save in new folder
processor.save_pretrained(lowerCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'r' ) as f:
snake_case = json.load(lowerCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'w' ) as f:
f.write(json.dumps(lowerCAmelCase ) )
snake_case = AutoProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(lowerCAmelCase )
# copy relevant files
copyfile(lowerCAmelCase , os.path.join(lowerCAmelCase , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'w' ) as f:
f.write('{}' )
snake_case = AutoProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
snake_case = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
snake_case = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase )
snake_case = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase , use_fast=lowerCAmelCase )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def snake_case ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase )
AutoProcessor.register(lowerCAmelCase , lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoProcessor.register(lowerCAmelCase , lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(lowerCAmelCase , 'vocab.txt' )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(lowerCAmelCase )
snake_case = CustomProcessor(lowerCAmelCase , lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCAmelCase )
snake_case = AutoProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
"""simple docstring"""
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = False
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = False
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = """AutoFeatureExtractor"""
_lowerCAmelCase : Dict = """AutoTokenizer"""
_lowerCAmelCase : Optional[Any] = False
try:
AutoConfig.register('custom' , lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
AutoTokenizer.register(lowerCAmelCase , slow_tokenizer_class=lowerCAmelCase )
AutoProcessor.register(lowerCAmelCase , lowerCAmelCase )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
"""simple docstring"""
snake_case = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def snake_case ( self ):
"""simple docstring"""
snake_case = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def snake_case ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def snake_case ( self ):
"""simple docstring"""
snake_case = WavaVecaProcessor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase , 'test-processor' ) , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(new_processor.feature_extractor , lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self ):
"""simple docstring"""
snake_case = WavaVecaProcessor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase , 'test-processor-org' ) , push_to_hub=lowerCAmelCase , use_auth_token=self._token , organization='valid_org' , )
snake_case = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(new_processor.feature_extractor , lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def snake_case ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(lowerCAmelCase , 'vocab.txt' )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(lowerCAmelCase )
snake_case = CustomProcessor(lowerCAmelCase , lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
snake_case = Repository(lowerCAmelCase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(lowerCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json' ) ) as f:
snake_case = json.load(lowerCAmelCase )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , 'custom_processing.py' ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 149
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase )
yield node.data
snake_case = node.next_node
@property
def snake_case ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Node(1)
SCREAMING_SNAKE_CASE__ = Node(2)
SCREAMING_SNAKE_CASE__ = Node(3)
SCREAMING_SNAKE_CASE__ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = Node(1)
print(root_node.has_loop) # False
| 149
| 1
|
def a ( snake_case__: int = 1_000 ):
'''simple docstring'''
lowercase_ = 2**power
lowercase_ = 0
while n:
lowercase_ , lowercase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 10_00 ) -> int:
'''simple docstring'''
__snake_case : Any = 1
__snake_case : List[str] = 0
for divide_by_number in range(UpperCAmelCase_ , digit + 1 ):
__snake_case : list[int] = []
__snake_case : str = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase_ ):
__snake_case : Dict = len(UpperCAmelCase_ )
__snake_case : Any = divide_by_number
else:
has_been_divided.append(UpperCAmelCase_ )
__snake_case : List[str] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_a : int= "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_a : Dict= BASE_URL + "/user"
# https://github.com/settings/tokens
_a : Union[str, Any]= os.environ.get("USER_TOKEN", "")
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> dict[Any, Any]:
'''simple docstring'''
__snake_case : Tuple = {
'Authorization': F"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 95
| 1
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a ( __a ) -> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a ( ) -> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''mock-s3-bucket'''
UpperCamelCase__ :int = f'''s3://{mock_bucket}'''
UpperCamelCase__ :List[str] = extract_path_from_uri(__a )
assert dataset_path.startswith('''s3://''' ) is False
UpperCamelCase__ :Optional[int] = '''./local/path'''
UpperCamelCase__ :Union[str, Any] = extract_path_from_uri(__a )
assert dataset_path == new_dataset_path
def a ( __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = is_remote_filesystem(__a )
assert is_remote is True
UpperCamelCase__ :Union[str, Any] = fsspec.filesystem('''file''' )
UpperCamelCase__ :Optional[int] = is_remote_filesystem(__a )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __a )
def a ( __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Tuple = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
UpperCamelCase__ :Union[str, Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCamelCase__ :Optional[int] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
UpperCamelCase__ :Dict = fsspec.filesystem(compression_fs_class.protocol , fo=__a )
assert isinstance(__a , __a )
UpperCamelCase__ :str = os.path.basename(__a )
UpperCamelCase__ :Optional[Any] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__a , '''r''' , encoding='''utf-8''' ) as f, open(__a , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :Tuple = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
UpperCamelCase__ :Any = compressed_file_paths[protocol]
UpperCamelCase__ :Union[str, Any] = '''dataset.jsonl'''
UpperCamelCase__ :str = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCamelCase__ , *UpperCamelCase__ :Dict = fsspec.get_fs_token_paths(__a )
assert fs.isfile(__a )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a ( __a , __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Any = hf_api.dataset_info(__a , token=__a )
UpperCamelCase__ :List[str] = HfFileSystem(repo_info=__a , token=__a )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__a ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__a , __a , clobber=__a )
with pytest.warns(__a ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__a ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 97
|
"""simple docstring"""
from statistics import mean
import numpy as np
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Any = 0
# Number of processes finished
__a : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__a : Any = [0] * no_of_process
# List to include calculation results
__a : str = [0] * no_of_process
# Sort by arrival time.
__a : List[Any] = [burst_time[i] for i in np.argsort(a_)]
__a : Tuple = [process_name[i] for i in np.argsort(a_)]
arrival_time.sort()
while no_of_process > finished_process_count:
__a : Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__a : Dict = arrival_time[i]
__a : Dict = 0
# Index showing the location of the process being performed
__a : Tuple = 0
# Saves the current response ratio.
__a : List[str] = 0
for i in range(0 , a_):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__a : Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__a : Tuple = temp
__a : Optional[Any] = i
# Calculate the turn around time
__a : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__a : int = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __A ( a_ :list , a_ :list , a_ :list , a_ :int) -> list:
__a : Dict = [0] * no_of_process
for i in range(0 , a_):
__a : Optional[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A = 5
A = ['''A''', '''B''', '''C''', '''D''', '''E''']
A = [1, 2, 3, 4, 5]
A = [1, 2, 3, 4, 5]
A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 160
| 0
|
from math import pi
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 159
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCamelCase : str = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case (unittest.TestCase , _a ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_tool("""text-question-answering""" )
self.tool.setup()
_lowerCAmelCase : Optional[Any] = load_tool("""text-question-answering""" , remote=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tool(_UpperCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.remote_tool(_UpperCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tool(text=_UpperCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.remote_tool(text=_UpperCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
| 159
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[str] = "levit"
def __init__( self , a=2_2_4 , a=3 , a=3 , a=2 , a=1 , a=1_6 , a=[1_2_8, 2_5_6, 3_8_4] , a=[4, 8, 1_2] , a=[4, 4, 4] , a=[1_6, 1_6, 1_6] , a=0 , a=[2, 2, 2] , a=[2, 2, 2] , a=0.02 , **a , ) -> Tuple:
super().__init__(**a )
lowercase__ : List[Any] = image_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Tuple = kernel_size
lowercase__ : Any = stride
lowercase__ : str = padding
lowercase__ : Tuple = hidden_sizes
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Dict = depths
lowercase__ : List[str] = key_dim
lowercase__ : Any = drop_path_rate
lowercase__ : Optional[int] = patch_size
lowercase__ : Dict = attention_ratio
lowercase__ : Optional[int] = mlp_ratio
lowercase__ : Any = initializer_range
lowercase__ : Union[str, Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = version.parse("1.11")
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
| 77
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """marian"""
_lowerCAmelCase = ["""past_key_values"""]
_lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __magic_name__=5_81_01 , __magic_name__=None , __magic_name__=10_24 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__=True , __magic_name__="gelu" , __magic_name__=10_24 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=5_81_00 , __magic_name__=False , __magic_name__=5_81_00 , __magic_name__=0 , __magic_name__=0 , __magic_name__=True , **__magic_name__ , ) -> str:
_a = vocab_size
_a = decoder_vocab_size or vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class a ( _SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a = {0: 'batch'}
_a = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_a = {0: 'batch', 1: 'decoder_sequence'}
_a = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__magic_name__ , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
_a = common_inputs['decoder_input_ids'].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__magic_name__ , __magic_name__ )
_a = max(__magic_name__ , __magic_name__ ) - min_num_layers
_a = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['attention_mask'].dtype
_a = torch.cat(
[common_inputs['attention_mask'], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
_a = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
_a = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
_a = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-4
| 168
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def A_ ( A__ , A__=False ) -> str:
a__ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A_ ( A__ , A__ , A__=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
a__ : List[str] = ''
else:
a__ : Tuple = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : List[str] = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
a__ : Optional[Any] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
a__ : List[Any] = in_proj_bias[: config.hidden_size]
a__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
a__ : str = in_proj_bias[-config.hidden_size :]
def A_ ( A__ ) -> Optional[int]:
a__ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def A_ ( A__ ) -> Optional[int]:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
a__ : Dict = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def A_ ( A__ , A__ , A__ ) -> int:
a__ : List[Any] = dct.pop(A__ )
a__ : Union[str, Any] = val
def A_ ( A__ , A__ ) -> Dict:
a__ : Tuple = ViTMSNConfig()
a__ : Dict = 1000
a__ : Optional[Any] = 'datasets/huggingface/label-files'
a__ : str = 'imagenet-1k-id2label.json'
a__ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ ) , 'r' ) )
a__ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
a__ : str = idalabel
a__ : List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a__ : int = 384
a__ : Optional[int] = 1536
a__ : int = 6
elif "l16" in checkpoint_url:
a__ : str = 1024
a__ : Any = 4096
a__ : str = 24
a__ : Dict = 16
a__ : Optional[int] = 0.1
elif "b4" in checkpoint_url:
a__ : Tuple = 4
elif "l7" in checkpoint_url:
a__ : Union[str, Any] = 7
a__ : Union[str, Any] = 1024
a__ : Dict = 4096
a__ : Dict = 24
a__ : str = 16
a__ : Tuple = 0.1
a__ : Dict = ViTMSNModel(A__ )
a__ : Optional[Any] = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )['target_encoder']
a__ : str = ViTImageProcessor(size=config.image_size )
remove_projection_head(A__ )
a__ : str = create_rename_keys(A__ , base_model=A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , base_model=A__ )
model.load_state_dict(A__ )
model.eval()
a__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ : Tuple = Image.open(requests.get(A__ , stream=A__ ).raw )
a__ : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=A__ , image_std=A__ )
a__ : Optional[Any] = image_processor(images=A__ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
a__ : str = model(**A__ )
a__ : int = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a__ : List[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
a__ : List[Any] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
a__ : Tuple = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
a__ : Dict = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
a__ : Optional[Any] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , A__ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : str = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 355
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
| 0
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A__: Optional[int] = logging.get_logger(__name__)
A__: Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: Any = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
A__: Any = {'''allegro/herbert-base-cased''': 514}
A__: Optional[int] = {}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = HerbertTokenizer
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: str=None , __lowerCamelCase: int="<s>" , __lowerCamelCase: Dict="<unk>" , __lowerCamelCase: Tuple="<pad>" , __lowerCamelCase: Union[str, Any]="<mask>" , __lowerCamelCase: int="</s>" , **__lowerCamelCase: Tuple , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sep_token=__lowerCamelCase , **__lowerCamelCase , )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Dict = [self.cls_token_id]
UpperCamelCase__: Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = [self.sep_token_id]
UpperCamelCase__: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: int = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 149
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_):
UpperCamelCase__: List[str] = cva.getAffineTransform(A_ ,A_)
return cva.warpAffine(A_ ,A_ ,(rows, cols))
if __name__ == "__main__":
# read original image
A__: Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__: Tuple = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__: List[Any] = gray_img.shape
# set different points to rotate image
A__: Tuple = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A__: Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A__: Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A__: Union[str, Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A__: str = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__: Optional[int] = plt.figure(1)
A__: List[str] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 149
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModel.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModel.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 14410 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 14410 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 14410 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(_UpperCAmelCase , from_tf=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCAmelCase ) , 14410 )
| 350
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''roberta'''
def __init__( self : int , _UpperCAmelCase : List[Any]=50265 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[Any]=1e-12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple="absolute" , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 241
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase : Dict = random.Random()
if is_torch_available():
import torch
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
if rng is None:
a__ : Any =global_rng
a__ : List[Any] =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=2_0_0_0 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6_0_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=True , ) -> Any:
'''simple docstring'''
a__ : List[Any] =parent
a__ : Optional[int] =batch_size
a__ : Optional[Any] =min_seq_length
a__ : List[str] =max_seq_length
a__ : Any =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Dict =feature_size
a__ : Dict =padding_value
a__ : Optional[int] =sampling_rate
a__ : Optional[int] =return_attention_mask
a__ : Optional[Any] =do_normalize
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Any:
'''simple docstring'''
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a__ : str =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__ : Union[str, Any] =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : List[Any] =[np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = ASTFeatureExtractor
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] =ASTFeatureExtractionTester(self )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ : str =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__ : List[Any] =[np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a__ : Dict =feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a__ : Optional[int] =feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a__ : Any =feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
a__ : str =feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ : str =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__ : Dict =np.asarray(lowerCAmelCase__ )
a__ : List[Any] =feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a__ : Optional[int] =feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
@require_torch
def _lowercase ( self ) -> int:
'''simple docstring'''
import torch
a__ : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ : Tuple =np.random.rand(1_0_0 ).astype(np.floataa )
a__ : Any =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ : int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__ : List[str] =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
from datasets import load_dataset
a__ : int =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : str =ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : Optional[Any] =torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
a__ : str =self._load_datasamples(1 )
a__ : Dict =ASTFeatureExtractor()
a__ : str =feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-4 ) )
| 95
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int =load_from_cache_file
a__ : Tuple =file_format
a__ : List[Any] =Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 95
| 1
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( A__ ):
A__ = 'segformer'
def __init__( self : Dict , _a : int=3 , _a : List[Any]=4 , _a : Union[str, Any]=[2, 2, 2, 2] , _a : Tuple=[8, 4, 2, 1] , _a : int=[32, 64, 160, 256] , _a : List[Any]=[7, 3, 3, 3] , _a : str=[4, 2, 2, 2] , _a : str=[1, 2, 5, 8] , _a : Union[str, Any]=[4, 4, 4, 4] , _a : List[Any]="gelu" , _a : List[str]=0.0 , _a : Optional[int]=0.0 , _a : str=0.1 , _a : Tuple=0.02 , _a : Union[str, Any]=0.1 , _a : List[str]=1e-6 , _a : Optional[int]=256 , _a : Optional[Any]=255 , **_a : Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_a )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , _a , )
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =num_encoder_blocks
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =sr_ratios
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =patch_sizes
_SCREAMING_SNAKE_CASE =strides
_SCREAMING_SNAKE_CASE =mlp_ratios
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =classifier_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =decoder_hidden_size
_SCREAMING_SNAKE_CASE =kwargs.get('reshape_last_stage' , _a )
_SCREAMING_SNAKE_CASE =semantic_loss_ignore_index
class A__ ( A__ ):
A__ = version.parse('1.11' )
@property
def A ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
@property
def A ( self : List[str] ) -> int:
'''simple docstring'''
return 12
| 114
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase : List[str] = {"UserAgent": UserAgent().random}
def _lowerCAmelCase ( _UpperCamelCase : str ) -> dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =script.contents[0]
_SCREAMING_SNAKE_CASE =json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A__ :
def __init__( self : int , _a : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =f"https://www.instagram.com/{username}/"
_SCREAMING_SNAKE_CASE =self.get_json()
def A ( self : Optional[int] ) -> dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =requests.get(self.url , headers=_a ).text
_SCREAMING_SNAKE_CASE =BeautifulSoup(_a , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
'''simple docstring'''
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
'''simple docstring'''
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def A ( self : List[Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def A ( self : str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def A ( self : Any ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def A ( self : Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def A ( self : List[str] ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def A ( self : Dict ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def A ( self : Tuple ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def _lowerCAmelCase ( _UpperCamelCase : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_SCREAMING_SNAKE_CASE =InstagramUser(_UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[int] = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 114
| 1
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str , **lowerCAmelCase_ :Any )->Optional[int]:
'''simple docstring'''
snake_case_ = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
snake_case_ = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 159
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self : Optional[Any] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : int , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
snake_case_ = size if size is not None else {"shortest_edge": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case_ = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : int = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 159
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
A_ : int = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
A_ : Tuple = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase () -> Any:
A__ : Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A__ : List[Any] = bs[:]
A__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
A__ : List[Any] = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def UpperCamelCase (lowercase_: int ) -> List[str]:
A__ : Tuple = set()
A__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : Optional[int] = char
return pairs
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = VOCAB_FILES_NAMES
UpperCAmelCase__: Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , A__ , A__ , A__="replace" , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=False , **A__ , ):
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else bos_token
A__ : Optional[Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else eos_token
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else sep_token
A__ : List[Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else cls_token
A__ : List[str] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else unk_token
A__ : Tuple = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : Union[str, Any] = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
super().__init__(
errors=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , **A__ , )
with open(A__ , encoding="""utf-8""" ) as vocab_handle:
A__ : Optional[int] = json.load(A__ )
A__ : List[str] = {v: k for k, v in self.encoder.items()}
A__ : Dict = errors # how to handle errors in decoding
A__ : List[Any] = bytes_to_unicode()
A__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(A__ , encoding="""utf-8""" ) as merges_handle:
A__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
A__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
A__ : List[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Optional[int] = {}
A__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , A__ ):
if token in self.cache:
return self.cache[token]
A__ : Optional[Any] = tuple(A__ )
A__ : int = get_pairs(A__ )
if not pairs:
return token
while True:
A__ : Tuple = min(A__ , key=lambda A__ : self.bpe_ranks.get(A__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Optional[int] = bigram
A__ : Union[str, Any] = []
A__ : List[Any] = 0
while i < len(A__ ):
try:
A__ : List[str] = word.index(A__ , A__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Optional[Any] = j
if word[i] == first and i < len(A__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : str = tuple(A__ )
A__ : List[str] = new_word
if len(A__ ) == 1:
break
else:
A__ : Any = get_pairs(A__ )
A__ : Tuple = """ """.join(A__ )
A__ : List[str] = word
return word
def __A ( self , A__ ):
A__ : List[str] = []
for token in re.findall(self.pat , A__ ):
A__ : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A__ ).split(""" """ ) )
return bpe_tokens
def __A ( self , A__ ):
return self.encoder.get(A__ , self.encoder.get(self.unk_token ) )
def __A ( self , A__ ):
return self.decoder.get(A__ )
def __A ( self , A__ ):
A__ : List[str] = """""".join(A__ )
A__ : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : List[str] = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A__ , ensure_ascii=A__ ) + """\n""" )
A__ : Union[str, Any] = 0
with open(A__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A__ : str = token_index
writer.write(""" """.join(A__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
A__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def __A ( self , A__ , A__ = None ):
A__ : int = [self.sep_token_id]
A__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A__ , A__=False , **A__ ):
A__ : Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A__ ) > 0 and not text[0].isspace()):
A__ : Dict = """ """ + text
return (text, kwargs)
| 141
|
import requests
A_ : List[Any] = 'YOUR API KEY'
def UpperCamelCase (lowercase_: str , lowercase_: str = giphy_api_key ) -> list:
A__ : Dict = """+""".join(query.split() )
A__ : Optional[int] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
A__ : Any = requests.get(lowercase_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 141
| 1
|
import math
import os
import sys
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = ""
try:
with open(__UpperCAmelCase , "rb") as binary_file:
SCREAMING_SNAKE_CASE : Optional[int] = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE : int = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible")
sys.exit()
def lowerCamelCase__ ( _a , _a , _a , _a):
lexicon.pop(__UpperCAmelCase)
SCREAMING_SNAKE_CASE : int = last_match_id
if math.loga(__UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE : Dict = "0" + lexicon[curr_key]
SCREAMING_SNAKE_CASE : List[str] = bin(__UpperCAmelCase)[2:]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = {"0": "0", "1": "1"}
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = "", ""
SCREAMING_SNAKE_CASE : Tuple = len(__UpperCAmelCase)
for i in range(len(__UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE : str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE : Tuple = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = os.path.getsize(__UpperCAmelCase)
SCREAMING_SNAKE_CASE : Optional[int] = bin(__UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE : Any = len(__UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 8
try:
with open(__UpperCAmelCase , "wb") as opened_file:
SCREAMING_SNAKE_CASE : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase) , __UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append("10000000")
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2).to_bytes(1 , byteorder="big"))
except OSError:
print("File not accessible")
sys.exit()
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = read_file_binary(__UpperCAmelCase)
SCREAMING_SNAKE_CASE : Dict = compress_data(__UpperCAmelCase)
SCREAMING_SNAKE_CASE : str = add_file_length(__UpperCAmelCase , __UpperCAmelCase)
write_file_binary(__UpperCAmelCase , __UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 76
|
from __future__ import annotations
from math import gcd
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return (pow(__UpperCAmelCase , 2 ) + step) % modulus
for _ in range(__UpperCAmelCase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE_ = seed
SCREAMING_SNAKE_CASE_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE_ = gcd(hare - tortoise , __UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Any = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
lowerCamelCase__ : Tuple = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 225
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Any = '''▁'''
a__ : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
a__ : Any = {
'''facebook/xglm-564M''': 2_048,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
SCREAMING_SNAKE_CASE : Dict = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE : Tuple = len(self.sp_model )
SCREAMING_SNAKE_CASE : Tuple = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase ))
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase ))
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCAmelCase ( self ) ->List[str]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : List[str] = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 364
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Tuple = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
a__ : Optional[Any] = {'''mobilebert-uncased''': 512}
a__ : List[Any] = {}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->Optional[int]:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : Optional[int] = strip_accents
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = do_lower_case
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 19
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''A red cat sitting on a park bench'''
__lowercase = np.random.RandomState(0 )
__lowercase = pipe(
prompt=lowercase__ ,image=lowercase__ ,mask_image=lowercase__ ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=lowercase__ ,output_type='''np''' ,)
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__lowercase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' ,subfolder='''scheduler''' ,revision='''onnx''' )
__lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' ,revision='''onnx''' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''A red cat sitting on a park bench'''
__lowercase = np.random.RandomState(0 )
__lowercase = pipe(
prompt=lowercase__ ,image=lowercase__ ,mask_image=lowercase__ ,guidance_scale=7.5 ,num_inference_steps=2_0 ,generator=lowercase__ ,output_type='''np''' ,)
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 104
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Union[str, Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Dict = ["""flax"""]
def __init__( self : Any , *a_ : Optional[int] , **a_ : str ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : str , *a_ : Optional[int] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Dict , **a_ : str ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[int] , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Dict , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Tuple , *a_ : Optional[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[int] , *a_ : List[Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : str , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Any , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""flax"""]
def __init__( self : Dict , *a_ : str , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Any , *a_ : Any , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Dict , **a_ : Dict ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : List[Any] , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Tuple , *a_ : Optional[int] , **a_ : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[str] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Optional[int] , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
| 241
| 0
|
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE__ ( ) -> Generator[int, None, None]:
__lowerCamelCase : dict[int, int] = {}
__lowerCamelCase : int = 2
while True:
__lowerCamelCase : Optional[Any] = factor_map.pop(lowerCamelCase__ , lowerCamelCase__ )
if factor:
__lowerCamelCase : int = factor + prime
while x in factor_map:
x += factor
__lowerCamelCase : int = factor
else:
__lowerCamelCase : Optional[int] = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1e10 ) -> int:
__lowerCamelCase : Union[str, Any] = sieve()
__lowerCamelCase : str = 1
while True:
__lowerCamelCase : Any = next(lowerCamelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 369
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a ="""true"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=8_2 , lowerCamelCase__=1_6 ) -> List[Any]:
set_seed(4_2 )
__lowerCamelCase : Tuple = RegressionModel()
__lowerCamelCase : str = deepcopy(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = RegressionDataset(length=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = DataLoader(lowerCamelCase__ , batch_size=lowerCamelCase__ )
model.to(accelerator.device )
__lowerCamelCase , __lowerCamelCase : Tuple = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> List[Any]:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
__lowerCamelCase : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
with accelerator.main_process_first():
__lowerCamelCase : Union[str, Any] = dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
__lowerCamelCase : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
if use_longest:
return tokenizer.pad(lowerCamelCase__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(lowerCamelCase__ , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1_6 )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = Accelerator(dispatch_batches=lowerCamelCase__ , split_batches=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = get_dataloader(lowerCamelCase__ , not dispatch_batches )
__lowerCamelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Tuple = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = []
for batch in dataloader:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = batch.values()
with torch.no_grad():
__lowerCamelCase : Tuple = model(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase , __lowerCamelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase__ )
targs.append(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.cat(lowerCamelCase__ ), torch.cat(lowerCamelCase__ )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=8_2 , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=1_6 ) -> Dict:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = get_basic_setup(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Dict = generate_predictions(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert (
len(lowerCamelCase__ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase__ )}"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = False , lowerCamelCase__ = False ) -> Dict:
__lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = get_mrpc_setup(lowerCamelCase__ , lowerCamelCase__ )
# First do baseline
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = setup['no']
model.to(lowerCamelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase__ )
with torch.inference_mode():
__lowerCamelCase : Dict = model(**lowerCamelCase__ )
__lowerCamelCase : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCamelCase__ , references=batch['labels'] )
__lowerCamelCase : str = metric.compute()
# Then do distributed
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase : List[str] = model(**lowerCamelCase__ )
__lowerCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
__lowerCamelCase : List[str] = batch['labels']
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
__lowerCamelCase : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : int = Accelerator(split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(lowerCamelCase__ , lowerCamelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase : Optional[Any] = Accelerator(split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(lowerCamelCase__ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
__lowerCamelCase : Dict = Accelerator()
test_torch_metrics(lowerCamelCase__ , 5_1_2 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 113
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a : Union[str, Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowercase : Optional[int] ) -> Optional[Any]:
super().__init__()
__UpperCAmelCase : List[str] = torchvision.models.resnetaaa(pretrained=__lowercase )
__UpperCAmelCase : str = list(model.children() )[:-2]
__UpperCAmelCase : Optional[Any] = nn.Sequential(*__lowercase )
__UpperCAmelCase : List[str] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__UpperCAmelCase : int = self.pool(self.model(__lowercase ) )
__UpperCAmelCase : int = torch.flatten(__lowercase , start_dim=2 )
__UpperCAmelCase : List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = [json.loads(__lowercase ) for l in open(__lowercase )]
__UpperCAmelCase : Tuple = os.path.dirname(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer
__UpperCAmelCase : Optional[Any] = labels
__UpperCAmelCase : Any = len(__lowercase )
__UpperCAmelCase : Dict = max_seq_length
__UpperCAmelCase : Union[str, Any] = transforms
def __len__( self : Optional[int] ) -> Dict:
return len(self.data )
def __getitem__( self : int , __lowercase : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__lowercase ) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Dict = sentence[: self.max_seq_length]
__UpperCAmelCase : List[str] = torch.zeros(self.n_classes )
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Dict = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Union[str, Any] = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase ( self : Optional[Any] ) -> int:
__UpperCAmelCase : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : Dict = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase , __UpperCAmelCase : List[str] = len(__lowerCamelCase ), max(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
__UpperCAmelCase : Optional[Any] = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Union[str, Any] = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Dict = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : List[str] = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 114
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 114
| 1
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def A (__A : Optional[Any] ) -> int:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def A () -> List[str]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = 'mock-s3-bucket'
UpperCAmelCase_ = F"""s3://{mock_bucket}"""
UpperCAmelCase_ = extract_path_from_uri(__A )
assert dataset_path.startswith('''s3://''' ) is False
UpperCAmelCase_ = './local/path'
UpperCAmelCase_ = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def A (__A : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = is_remote_filesystem(__A )
assert is_remote is True
UpperCAmelCase_ = fsspec.filesystem('''file''' )
UpperCAmelCase_ = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __A )
def A (__A : Union[str, Any] , __A : Optional[int] , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
UpperCAmelCase_ = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_ = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
UpperCAmelCase_ = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
UpperCAmelCase_ = os.path.basename(__A )
UpperCAmelCase_ = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__A , '''r''' , encoding='''utf-8''' ) as f, open(__A , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def A (__A : Any , __A : Dict , __A : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
UpperCAmelCase_ = compressed_file_paths[protocol]
UpperCAmelCase_ = 'dataset.jsonl'
UpperCAmelCase_ = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase_ = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def A (__A : int , __A : str , __A : Any , __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = hf_api.dataset_info(__A , token=__A )
UpperCAmelCase_ = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__A ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 355
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : list, lowercase__ : int ):
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__, n - 1 )
rec_insertion_sort(lowercase__, n - 1 )
def __UpperCamelCase ( lowercase__ : list, lowercase__ : int ):
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowercase , __lowercase =(
collection[index],
collection[index - 1],
)
insert_next(lowercase__, index + 1 )
if __name__ == "__main__":
UpperCAmelCase = input('''Enter integers separated by spaces: ''')
UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 141
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "data2vec-audio"
def __init__( self : Tuple , __lowercase : Optional[int]=32 , __lowercase : List[str]=768 , __lowercase : List[str]=12 , __lowercase : str=12 , __lowercase : Tuple=3072 , __lowercase : Any="gelu" , __lowercase : Dict=0.1 , __lowercase : Any=0.1 , __lowercase : Tuple=0.1 , __lowercase : List[str]=0.0 , __lowercase : List[Any]=0.1 , __lowercase : str=0.1 , __lowercase : Optional[int]=0.0_2 , __lowercase : Dict=1E-5 , __lowercase : Any="gelu" , __lowercase : Dict=(512, 512, 512, 512, 512, 512, 512) , __lowercase : str=(5, 2, 2, 2, 2, 2, 2) , __lowercase : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __lowercase : Dict=False , __lowercase : int=16 , __lowercase : Any=19 , __lowercase : Tuple=5 , __lowercase : Optional[Any]=0.0_5 , __lowercase : Optional[int]=10 , __lowercase : int=2 , __lowercase : Optional[Any]=0.0 , __lowercase : Tuple=10 , __lowercase : Union[str, Any]=0 , __lowercase : Optional[int]="sum" , __lowercase : str=False , __lowercase : Union[str, Any]=False , __lowercase : Any=256 , __lowercase : str=(512, 512, 512, 512, 1500) , __lowercase : Union[str, Any]=(5, 3, 3, 1, 1) , __lowercase : List[Any]=(1, 2, 3, 1, 1) , __lowercase : Any=512 , __lowercase : int=0 , __lowercase : Union[str, Any]=1 , __lowercase : Optional[int]=2 , __lowercase : Any=False , __lowercase : Optional[int]=3 , __lowercase : Optional[Any]=2 , __lowercase : Any=3 , __lowercase : Tuple=None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
__lowercase =hidden_size
__lowercase =feat_extract_activation
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =conv_bias
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =conv_pos_kernel_size
__lowercase =len(self.conv_dim )
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =vocab_size
__lowercase =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
__lowercase =mask_feature_min_masks
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =xvector_output_dim
@property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 141
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _snake_case ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase_ :Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowerCAmelCase_ :str = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowerCAmelCase_ :List[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowerCAmelCase_ :List[str] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowerCAmelCase_ :str = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowerCAmelCase_ :List[str] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowerCAmelCase_ :Tuple = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowerCAmelCase_ :Tuple = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowerCAmelCase_ :Any = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowerCAmelCase_ :Tuple = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowerCAmelCase_ :Dict = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowerCAmelCase_ :int = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowerCAmelCase_ :Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowerCAmelCase_ :Optional[Any] = key.replace("""text_projection""" , """flava.text_projection""" )
lowerCAmelCase_ :Any = key.replace("""image_projection""" , """flava.image_projection""" )
lowerCAmelCase_ :int = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase_ :Dict = value
return upgrade
@torch.no_grad()
def _snake_case ( lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : int=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ :Union[str, Any] = FlavaConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ :Union[str, Any] = FlavaConfig()
lowerCAmelCase_ :str = FlavaForPreTraining(lowercase__ ).eval()
lowerCAmelCase_ :Union[str, Any] = convert_dalle_checkpoint(lowercase__ , lowercase__ , save_checkpoint=lowercase__ )
if os.path.exists(lowercase__ ):
lowerCAmelCase_ :str = torch.load(lowercase__ , map_location="""cpu""" )
else:
lowerCAmelCase_ :List[str] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ :Dict = upgrade_state_dict(lowercase__ , lowercase__ )
hf_model.load_state_dict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = hf_model.state_dict()
lowerCAmelCase_ :Any = count_parameters(lowercase__ )
lowerCAmelCase_ :str = count_parameters(lowercase__ ) + count_parameters(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCAmelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a :
_lowerCAmelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
_lowerCAmelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don\'t set if you want to train an encoder model from scratch."""
)
} , )
_lowerCAmelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don\'t set if you want to train a decoder model from scratch."""
)
} , )
_lowerCAmelCase = field(
default=snake_case_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
_lowerCAmelCase = field(
default=snake_case_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def _A () -> List[Any]:
'''simple docstring'''
_a = HfArgumentParser((ModelArguments,) )
((_a ) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_a = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_a = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_a = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_a = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_a = True
_a = True
_a = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_a = decoder_config.decoder_start_token_id
_a = decoder_config.pad_token_id
if decoder_start_token_id is None:
_a = decoder_config.bos_token_id
if pad_token_id is None:
_a = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_a = decoder_config.eos_token_id
_a = decoder_start_token_id
_a = pad_token_id
_a = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_a = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_a = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 168
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE_ ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def lowercase (SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
SCREAMING_SNAKE_CASE = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = nlp
SCREAMING_SNAKE_CASE = reader
@staticmethod
def __A ( lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=lowerCAmelCase__ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=lowerCAmelCase__ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=lowerCAmelCase__ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=lowerCAmelCase__ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=lowerCAmelCase__ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=lowerCAmelCase__ )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE = nlp(**lowerCAmelCase__ ) if self._reader.is_multi_columns else nlp(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
outputs.append(lowerCAmelCase__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE = self._reader.save_binary(lowerCAmelCase__ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowerCAmelCase__ )
| 38
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__UpperCamelCase = {'''mgp-str''': 27}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[GO]" , lowerCAmelCase__="[s]" , lowerCAmelCase__="[GO]" , **lowerCAmelCase__ ) -> int:
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
@property
def __A ( self ) -> List[str]:
return len(self.vocab )
def __A ( self ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def __A ( self , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def __A ( self , lowerCAmelCase__ ) -> int:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __A ( self , lowerCAmelCase__ ) -> int:
return self.decoder.get(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
return (vocab_file,)
| 38
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE :Any = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
SCREAMING_SNAKE_CASE :Optional[int] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE :str = {F'''funnel-transformer/{name}''': 5_12 for name in _model_names}
SCREAMING_SNAKE_CASE :Tuple = {F'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class __lowerCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = FunnelTokenizer
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = 2
def __init__( self : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict="<unk>" , _lowerCAmelCase : Tuple="<sep>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Any="<cls>" , _lowerCAmelCase : str="<mask>" , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=None , _lowerCAmelCase : Dict="##" , **_lowerCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCAmelCase__ )
snake_case_ = do_lower_case
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any=None ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 159
|
"""simple docstring"""
from math import isclose, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> tuple[float, float, float]:
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase (SCREAMING_SNAKE_CASE_ : float = 1.4 , SCREAMING_SNAKE_CASE_ : float = -9.6 ) -> int:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 113
| 0
|
import os
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCamelCase_ ) , lowerCamelCase_ ) ) as input_file:
SCREAMING_SNAKE_CASE__ : List[Any] = [
[int(lowerCamelCase_ ) for element in line.split("," )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE__ : Tuple = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : Any = len(matrix[0] )
SCREAMING_SNAKE_CASE__ : Any = [[-1 for _ in range(lowerCamelCase_ )] for _ in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ : Any = matrix[i][0]
for j in range(1 , lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 368
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : List[Any]=sys.maxsize ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "bilinear"
SCREAMING_SNAKE_CASE__ : Optional[int] = max_size
SCREAMING_SNAKE_CASE__ : Optional[int] = short_edge_length
def __call__( self : Optional[int], _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
for img in imgs:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE__ : List[str] = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE__ : int = size * 1.0 / min(_UpperCAmelCase, _UpperCAmelCase )
if h < w:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
if max(_UpperCAmelCase, _UpperCAmelCase ) > self.max_size:
SCREAMING_SNAKE_CASE__ : str = self.max_size * 1.0 / max(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = newh * scale
SCREAMING_SNAKE_CASE__ : List[str] = neww * scale
SCREAMING_SNAKE_CASE__ : Any = int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pil_image.resize((neww, newh), PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : str = img.permute(2, 0, 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE__ : Tuple = nn.functional.interpolate(
_UpperCAmelCase, (newh, neww), mode=self.interp_method, align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE__ : Any = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE__ : List[str] = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE__ : List[Any] = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE__ : Dict = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE__ : Optional[int] = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE__ : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def A_ ( self : str, _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
nn.functional.pad(
_UpperCAmelCase, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, )
for size, im in zip(_UpperCAmelCase, _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i], torch.Tensor ):
images.insert(_UpperCAmelCase, images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i], torch.Tensor ):
images.insert(
_UpperCAmelCase, torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ), input_format=self.input_format ) )
.to(self.device )
.float(), )
# resize smallest edge
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE__ : Tuple = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE__ : List[Any] = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.true_divide(_UpperCAmelCase, _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple[int, int] ) -> List[Any]:
'''simple docstring'''
assert torch.isfinite(SCREAMING_SNAKE_CASE__ ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
| 191
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase__ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase__ = False
@property
def A_ ( self : Union[str, Any] ) -> List[Any]:
return 32
@property
def A_ ( self : Optional[int] ) -> str:
return 32
@property
def A_ ( self : str ) -> Tuple:
return self.time_input_dim
@property
def A_ ( self : int ) -> Any:
return self.time_input_dim * 4
@property
def A_ ( self : List[str] ) -> int:
return 100
@property
def A_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : Dict = UNetaDConditionModel(**UpperCAmelCase )
return model
@property
def A_ ( self : Any ) -> Optional[int]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : Optional[Any] = self.dummy_unet
lowerCamelCase__ : str = self.dummy_movq
lowerCamelCase__ : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A_ ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str=0 ) -> List[Any]:
lowerCamelCase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCamelCase__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase )
# create hint
lowerCamelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith('mps' ):
lowerCamelCase__ : Dict = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> Any:
lowerCamelCase__ : int = 'cpu'
lowerCamelCase__ : List[str] = self.get_dummy_components()
lowerCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase__ : Dict = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = output.images
lowerCamelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0]
lowerCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Dict = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Optional[int] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
lowerCamelCase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : str = torch.from_numpy(np.array(UpperCAmelCase ) ).float() / 2_5_5.0
lowerCamelCase__ : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCamelCase__ : str = pipeline.to(UpperCAmelCase )
pipeline.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : int = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = pipe_prior(
UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCamelCase__ : Union[str, Any] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCamelCase__ : List[Any] = pipeline(
image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , hint=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=100 , output_type='np' , )
lowerCamelCase__ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 50
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase_ = False
@skip_mps
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionAttendAndExcitePipeline
lowerCamelCase = False
lowerCamelCase = TEXT_TO_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case__ ( cls : Any )-> Optional[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : Optional[Any] )-> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = A__ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = pipe(**lowercase_ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 6_4, 6_4, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_,1E-3 )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def snake_case__ ( self : str )-> int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 )
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Any )-> Optional[int]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : int )-> List[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = torch.manual_seed(5_1 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa )
pipe.to('cuda' )
A__ = 'a painting of an elephant with glasses'
A__ = [5, 7]
A__ = pipe(
prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0]
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 7
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = '''encoder-decoder'''
lowerCamelCase : List[str] = True
def __init__( self : Any , **UpperCAmelCase__ : List[Any] ) -> str:
super().__init__(**UpperCAmelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase = kwargs.pop('encoder' )
lowerCAmelCase = encoder_config.pop('model_type' )
lowerCAmelCase = kwargs.pop('decoder' )
lowerCAmelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = True
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Any ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
lowerCAmelCase = True
lowerCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.encoder.to_dict()
lowerCAmelCase = self.decoder.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 55
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = torch.exp(lowerCamelCase )
lowerCAmelCase = torch.sum(lowerCamelCase , dim=1 ) # sum of exp(x_i)
lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCamelCase ) - B / A
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : int ) -> str:
super().__init__()
lowerCAmelCase = config.output_attentions
lowerCAmelCase = config.output_hidden_states
lowerCAmelCase = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> int:
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase = x
else:
lowerCAmelCase = x
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , ) -> str:
lowerCAmelCase = ()
lowerCAmelCase = ()
lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase = all_attentions + (layer_outputs[1],)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = current_outputs + (all_attentions,)
lowerCAmelCase = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
lowerCAmelCase = highway_exit[0]
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase = all_hidden_states + (hidden_states,)
lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase = outputs + (all_attentions,)
lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> str:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config
lowerCAmelCase = BertEmbeddings(UpperCAmelCase__ )
lowerCAmelCase = DeeBertEncoder(UpperCAmelCase__ )
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
self.init_weights()
def __UpperCAmelCase ( self : Any ) -> int:
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Dict ) -> List[Any]:
lowerCAmelCase = value
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , ) -> Optional[int]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
lowerCAmelCase = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase = encoder_attention_mask[:, None, None, :]
lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
lowerCAmelCase = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
lowerCAmelCase = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> Dict:
lowerCAmelCase = message
lowerCAmelCase = exit_layer # start from 1!
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
super().__init__()
lowerCAmelCase = BertPooler(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict ) -> Optional[int]:
# Pooler
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase = bmodel_output[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , UpperCAmelCase__ : Dict ) -> Any:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config.num_labels
lowerCAmelCase = config.num_hidden_layers
lowerCAmelCase = DeeBertModel(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=-1 , UpperCAmelCase__ : Optional[Any]=False , ) -> Dict:
lowerCAmelCase = self.num_layers
try:
lowerCAmelCase = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase = outputs[1]
lowerCAmelCase = self.dropout(UpperCAmelCase__ )
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase = e.message
lowerCAmelCase = e.exit_layer
lowerCAmelCase = outputs[0]
if not self.training:
lowerCAmelCase = entropy(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase = []
for highway_exit in outputs[-1]:
lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase = (loss,) + outputs
if not self.training:
lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 55
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
UpperCAmelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
UpperCAmelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
UpperCAmelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
UpperCAmelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
UpperCAmelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
UpperCAmelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
UpperCAmelCase_ = key.replace("image_encoder.module" , "flava.image_model" )
UpperCAmelCase_ = key.replace("text_encoder.module" , "flava.text_model" )
UpperCAmelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
UpperCAmelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
UpperCAmelCase_ = key.replace("text_projection" , "flava.text_projection" )
UpperCAmelCase_ = key.replace("image_projection" , "flava.image_projection" )
UpperCAmelCase_ = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase_ = value
return upgrade
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = FlavaConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = FlavaConfig()
UpperCAmelCase_ = FlavaForPreTraining(snake_case_ ).eval()
UpperCAmelCase_ = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ )
if os.path.exists(snake_case_ ):
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = upgrade_state_dict(snake_case_ , snake_case_ )
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = hf_model.state_dict()
UpperCAmelCase_ = count_parameters(snake_case_ )
UpperCAmelCase_ = count_parameters(snake_case_ ) + count_parameters(snake_case_ )
assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1
| 1
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : List[str] , __A : str ) -> int:
__lowerCAmelCase : str = set()
__lowerCAmelCase : int = []
def parse_line(__A : List[Any] ):
for line in fp:
if isinstance(__A , __A ):
__lowerCAmelCase : str = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
__lowerCAmelCase : Tuple = """\n""".join(__A )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
__lowerCAmelCase : Optional[int] = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
__lowerCAmelCase : Optional[Any] = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def snake_case_ (__A : Dict , __A : Union[str, Any] ) -> Dict:
__lowerCAmelCase : Any = set()
__lowerCAmelCase : Optional[int] = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ (__A : int ) -> Tuple:
return values.split(""",""" )
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
__UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 139
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : int=18 , lowerCAmelCase : int=30 , lowerCAmelCase : Optional[int]=4_00 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=True , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
__lowerCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase : str = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[str] = max_resolution
__lowerCAmelCase : List[Any] = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : List[Any] = do_center_crop
__lowerCAmelCase : Optional[Any] = crop_size
__lowerCAmelCase : int = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 139
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCAmelCase_ : str = tuple[int, int]
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Node | None , ):
UpperCamelCase :Any = pos_x
UpperCamelCase :List[str] = pos_y
UpperCamelCase :Any = (pos_y, pos_x)
UpperCamelCase :str = goal_x
UpperCamelCase :Tuple = goal_y
UpperCamelCase :Tuple = g_cost
UpperCamelCase :Tuple = parent
UpperCamelCase :List[Any] = self.calculate_heuristic()
UpperCamelCase :List[str] = self.g_cost + self.h_cost
def _A ( self : List[Any] ):
UpperCamelCase :List[str] = self.pos_x - self.goal_x
UpperCamelCase :Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCamelCase ) + abs(__lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[Any] , __lowerCamelCase : Node ):
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowerCamelCase : TPosition , __lowerCamelCase : TPosition ):
UpperCamelCase :str = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCamelCase )
UpperCamelCase :int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __lowerCamelCase )
UpperCamelCase :Optional[Any] = [self.start]
UpperCamelCase :list[Node] = []
UpperCamelCase :Optional[int] = False
def _A ( self : Optional[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase :Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCamelCase )
self.closed_nodes.append(__lowerCamelCase )
UpperCamelCase :List[Any] = self.get_successors(__lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
UpperCamelCase :List[Any] = self.open_nodes.pop(self.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCamelCase )
else:
self.open_nodes.append(__lowerCamelCase )
return [self.start.pos]
def _A ( self : Tuple , __lowerCamelCase : Node ):
UpperCamelCase :List[Any] = []
for action in delta:
UpperCamelCase :Union[str, Any] = parent.pos_x + action[1]
UpperCamelCase :List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCamelCase , ) )
return successors
def _A ( self : Any , __lowerCamelCase : Node | None ):
UpperCamelCase :Union[str, Any] = node
UpperCamelCase :List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase :Optional[Any] = current_node.parent
path.reverse()
return path
class _SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __lowerCamelCase : TPosition , __lowerCamelCase : TPosition ):
UpperCamelCase :Any = AStar(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = AStar(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = False
def _A ( self : Tuple ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase :Dict = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase :Union[str, Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCamelCase , __lowerCamelCase )
self.fwd_astar.closed_nodes.append(__lowerCamelCase )
self.bwd_astar.closed_nodes.append(__lowerCamelCase )
UpperCamelCase :int = current_bwd_node
UpperCamelCase :str = current_fwd_node
UpperCamelCase :List[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCamelCase )
else:
# retrieve the best current path
UpperCamelCase :Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCamelCase )
else:
astar.open_nodes.append(__lowerCamelCase )
return [self.fwd_astar.start.pos]
def _A ( self : List[Any] , __lowerCamelCase : Node , __lowerCamelCase : Node ):
UpperCamelCase :List[str] = self.fwd_astar.retrace_path(__lowerCamelCase )
UpperCamelCase :Tuple = self.bwd_astar.retrace_path(__lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase :List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCAmelCase_ : Optional[int] = (0, 0)
UpperCAmelCase_ : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Any = AStar(init, goal)
UpperCAmelCase_ : str = a_star.search()
UpperCAmelCase_ : Optional[int] = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
UpperCAmelCase_ : Union[str, Any] = time.time()
UpperCAmelCase_ : List[str] = BidirectionalAStar(init, goal)
UpperCAmelCase_ : Union[str, Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 38
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38
| 1
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 368
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 88
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'dpr'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a = 0 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = projection_dim
UpperCamelCase__ = position_embedding_type
| 80
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Any = '''xmod'''
def __init__( self ,SCREAMING_SNAKE_CASE__=3_05_22 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__="absolute" ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=("en_XX",) ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :List[str] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :List[Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE :Any = use_cache
__SCREAMING_SNAKE_CASE :List[str] = classifier_dropout
__SCREAMING_SNAKE_CASE :Any = pre_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reduction_factor
__SCREAMING_SNAKE_CASE :Dict = adapter_layer_norm
__SCREAMING_SNAKE_CASE :Dict = adapter_reuse_layer_norm
__SCREAMING_SNAKE_CASE :Tuple = ln_before_adapter
__SCREAMING_SNAKE_CASE :Any = list(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = default_language
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 191
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase : Optional[int] = list[list[float | int]]
def A_( A : Matrix , A : Matrix):
UpperCamelCase = len(__a)
UpperCamelCase = [[0 for _ in range(size + 1)] for _ in range(__a)]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
for row in range(__a):
for col in range(__a):
UpperCamelCase = matrix[row][col]
UpperCamelCase = vector[row][0]
UpperCamelCase = 0
UpperCamelCase = 0
while row < size and col < size:
# pivoting
UpperCamelCase = max((abs(augmented[rowa][col]), rowa) for rowa in range(__a , __a))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCamelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __a):
UpperCamelCase = augmented[rowa][col] / augmented[row][col]
UpperCamelCase = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __a):
for row in range(__a):
UpperCamelCase = augmented[row][col] / augmented[col][col]
for cola in range(__a , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(__a)
]
def A_( A : list[int]):
UpperCamelCase = len(__a)
UpperCamelCase = [[0 for _ in range(__a)] for _ in range(__a)]
UpperCamelCase = [[0] for _ in range(__a)]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
for x_val, y_val in enumerate(__a):
for col in range(__a):
UpperCamelCase = (x_val + 1) ** (size - col - 1)
UpperCamelCase = y_val
UpperCamelCase = solve(__a , __a)
def interpolated_func(A : int) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(__a))
return interpolated_func
def A_( A : int):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A_( A : Callable[[int], int] = question_function , A : int = 10):
UpperCamelCase = [func(__a) for x_val in range(1 , order + 1)]
UpperCamelCase = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
UpperCamelCase = 0
UpperCamelCase = 42
UpperCamelCase = 42
for poly in polynomials:
UpperCamelCase = 1
while func(__a) == poly(__a):
x_val += 1
ret += poly(__a)
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 355
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ = False , )-> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Embedding(A_ , A_ )
UpperCamelCase = nn.Embedding(A_ , A_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=A_ )
UpperCamelCase = TaConfig(
vocab_size=A_ , d_model=A_ , num_heads=A_ , d_kv=A_ , d_ff=A_ , dropout_rate=A_ , feed_forward_proj=A_ , is_decoder=A_ , is_encoder_decoder=A_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(A_ ):
UpperCamelCase = TaBlock(A_ )
self.encoders.append(A_ )
UpperCamelCase = TaLayerNorm(A_ )
UpperCamelCase = nn.Dropout(p=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.token_embedder(A_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(A_ , device=encoder_input_tokens.device )
x += self.position_encoding(A_ )
UpperCamelCase = self.dropout_pre(A_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(A_ , A_ )
for lyr in self.encoders:
UpperCamelCase = lyr(A_ , A_ )[0]
UpperCamelCase = self.layer_norm(A_ )
return self.dropout_post(A_ ), encoder_inputs_mask
| 251
| 0
|
'''simple docstring'''
from collections import defaultdict
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(UpperCAmelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCAmelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ : str = input("""Enter the first string """).strip()
a_ : Dict = input("""Enter the second string """).strip()
a_ : Dict = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 55
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableUnCLIPPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 55
| 1
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
__A = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
__A = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , lowerCamelCase__=False ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = compute_bleu(
reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ )
(__lowerCamelCase) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 351
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348
| 0
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
A_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
A_ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
A_ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] ,)
def __UpperCamelCase ( self : List[str] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]="uniform_average" ,SCREAMING_SNAKE_CASE__ : int=True ):
SCREAMING_SNAKE_CASE:int = mean_squared_error(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,sample_weight=SCREAMING_SNAKE_CASE__ ,multioutput=SCREAMING_SNAKE_CASE__ ,squared=SCREAMING_SNAKE_CASE__ )
return {"mse": mse}
| 139
|
'''simple docstring'''
from __future__ import annotations
def A_ ( snake_case , snake_case , snake_case , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
| 1
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[str] = old_name
if "patch_embed" in old_name:
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = old_name.split('.' )
if layer == "0":
__magic_name__ : Optional[Any] = old_name.replace('0' , 'convolution1' )
elif layer == "1":
__magic_name__ : Optional[Any] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__magic_name__ : int = old_name.replace('3' , 'convolution2' )
else:
__magic_name__ : List[str] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , lowerCAmelCase ):
__magic_name__ : str = R'\b\d{2}\b'
if bool(re.search(lowerCAmelCase , lowerCAmelCase ) ):
__magic_name__ : int = re.search(R'\d\.\d\d.' , lowerCAmelCase ).group()
else:
__magic_name__ : Union[str, Any] = re.search(R'\d\.\d.' , lowerCAmelCase ).group()
if int(match[0] ) < 6:
__magic_name__ : Dict = old_name.replace(lowerCAmelCase , '' )
__magic_name__ : List[str] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__magic_name__ : Optional[int] = 'intermediate_stages.' + trimmed_name
else:
__magic_name__ : Optional[Any] = old_name.replace(lowerCAmelCase , '' )
if int(match[2] ) < num_meta4D_last_stage:
__magic_name__ : Any = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__magic_name__ : Dict = str(int(match[2] ) - num_meta4D_last_stage )
__magic_name__ : Any = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__magic_name__ : Union[str, Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__magic_name__ : Union[str, Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__magic_name__ : Any = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__magic_name__ : List[Any] = trimmed_name.replace('fc2' , 'linear_out' )
__magic_name__ : Optional[int] = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , lowerCAmelCase ):
__magic_name__ : Dict = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__magic_name__ : Optional[int] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__magic_name__ : Optional[int] = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__magic_name__ : List[Any] = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__magic_name__ : Tuple = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__magic_name__ : List[Any] = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__magic_name__ : str = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__magic_name__ : str = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__magic_name__ : int = new_name.replace('norm' , 'layernorm' )
__magic_name__ : Union[str, Any] = 'efficientformer.' + new_name
else:
__magic_name__ : Any = 'efficientformer.encoder.' + new_name
return new_name
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__magic_name__ : Optional[Any] = checkpoint.pop(lowerCAmelCase )
__magic_name__ : List[str] = val
return checkpoint
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__magic_name__ : Any = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
def lowerCamelCase ( lowerCAmelCase : Path , lowerCAmelCase : Path , lowerCAmelCase : Path , lowerCAmelCase : bool ):
"""simple docstring"""
__magic_name__ : Any = torch.load(lowerCAmelCase , map_location='cpu' )['model']
__magic_name__ : Optional[int] = EfficientFormerConfig.from_json_file(lowerCAmelCase )
__magic_name__ : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase )
__magic_name__ : int = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__magic_name__ : Optional[Any] = config.depths[-1] - config.num_metaad_blocks + 1
__magic_name__ : Optional[Any] = convert_torch_checkpoint(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
__magic_name__ : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__magic_name__ : Tuple = prepare_img()
__magic_name__ : List[str] = 256
__magic_name__ : Dict = 224
__magic_name__ : List[str] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__magic_name__ : Optional[Any] = processor(images=lowerCAmelCase , return_tensors='pt' ).pixel_values
# original processing pipeline
__magic_name__ : int = Compose(
[
Resize(lowerCAmelCase , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowerCAmelCase ),
ToTensor(),
Normalize(lowerCAmelCase , lowerCAmelCase ),
] )
__magic_name__ : Union[str, Any] = image_transforms(lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[int] = model(lowerCAmelCase )
__magic_name__ : Tuple = outputs.logits
__magic_name__ : Union[str, Any] = (1, 1000)
if "l1" in model_name:
__magic_name__ : Union[str, Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__magic_name__ : Optional[Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__magic_name__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(lowerCAmelCase )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=lowerCAmelCase , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase :Tuple = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 275
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__magic_name__ : int = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase :Any = input('''Enter a string ''').strip()
lowerCAmelCase :List[Any] = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 275
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: Union[str, Any], a_: List[str] ):
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( ):
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 145
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88
| 0
|
"""simple docstring"""
from datetime import datetime
import requests
def lowercase__(A ) ->bytes:
"""simple docstring"""
lowercase__ : Optional[int]= "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
lowercase__ : Dict= requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCAmelCase__ ).content
if __name__ == "__main__":
a : Optional[Any] = input("""Enter Video/IGTV url: """).strip()
a : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 366
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
a : Any = get_logger(__name__)
a : Any = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase:
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@add_start_docstrings(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
for processor in self:
lowercase__ : Optional[Any]= inspect.signature(processor.__call__ ).parameters
if len(snake_case__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
lowercase__ : Union[str, Any]= processor(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
else:
lowercase__ : Dict= processor(snake_case__ , snake_case__ , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowercase__ : Any= temperature
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : int= scores / self.temperature
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(snake_case__ , snake_case__ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowercase__ : int= top_p
lowercase__ : Optional[int]= filter_value
lowercase__ : Tuple= min_tokens_to_keep
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Dict= lax.top_k(snake_case__ , scores.shape[-1] )
lowercase__ : Optional[int]= jnp.full_like(snake_case__ , self.filter_value )
lowercase__ : Union[str, Any]= jax.nn.softmax(snake_case__ , axis=-1 ).cumsum(axis=-1 )
lowercase__ : str= cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase__ : str= jnp.roll(snake_case__ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case__ )
# min tokens to keep
lowercase__ : Optional[int]= score_mask.at[:, : self.min_tokens_to_keep].set(snake_case__ )
lowercase__ : str= jnp.where(snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= jax.lax.sort_key_val(snake_case__ , snake_case__ )[-1]
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = -float("Inf" ) , snake_case__ = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowercase__ : List[Any]= max(snake_case__ , snake_case__ )
lowercase__ : Dict= filter_value
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__, lowercase__ : Optional[Any]= scores.shape
lowercase__ : int= jnp.full(batch_size * vocab_size , self.filter_value )
lowercase__ : Dict= min(self.top_k , scores.shape[-1] ) # Safety check
lowercase__, lowercase__ : List[Any]= lax.top_k(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.broadcast_to((jnp.arange(snake_case__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase__ : str= topk_scores.flatten()
lowercase__ : Any= topk_indices.flatten() + shift
lowercase__ : Optional[Any]= next_scores_flat.at[topk_indices_flat].set(snake_case__ )
lowercase__ : str= next_scores_flat.reshape(snake_case__ , snake_case__ )
return next_scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= bos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : int= 1 - jnp.bool_(cur_len - 1 )
lowercase__ : int= jnp.where(snake_case__ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Tuple= max_length
lowercase__ : str= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= jnp.full(scores.shape , -float("inf" ) )
lowercase__ : Any= 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase__ : Optional[int]= jnp.where(snake_case__ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(snake_case__ , snake_case__ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowercase__ : List[str]= min_length
lowercase__ : Dict= eos_token_id
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
lowercase__ : Tuple= 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase__ : Dict= jnp.where(snake_case__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= list(snake_case__ )
lowercase__ : List[Any]= begin_index
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= 1 - jnp.bool_(cur_len - self.begin_index )
lowercase__ : str= jnp.where(snake_case__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case__ )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= list(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : int= dict(snake_case__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase__ : List[Any]= jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase__ : List[Any]= force_token_array.at[index].set(snake_case__ )
lowercase__ : int= jnp.intaa(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
def _force_token(snake_case__ ):
lowercase__ : Dict= scores.shape[0]
lowercase__ : Any= self.force_token_array[generation_idx]
lowercase__ : List[Any]= jnp.ones_like(snake_case__ , dtype=scores.dtype ) * -float("inf" )
lowercase__ : List[Any]= jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase__ : List[str]= lax.dynamic_update_slice(snake_case__ , snake_case__ , (0, current_token) )
return new_scores
lowercase__ : Dict= lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case__ ) , lambda: scores , ) , )
return scores
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
lowercase__ : str= generate_config.eos_token_id
lowercase__ : Optional[int]= generate_config.no_timestamps_token_id
lowercase__ : Dict= generate_config.no_timestamps_token_id + 1
lowercase__ : List[Any]= decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case__ , "max_initial_timestamp_index" ):
lowercase__ : int= generate_config.max_initial_timestamp_index
else:
lowercase__ : Dict= model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase__ : str= model_config.vocab_size
def __call__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
lowercase__ : int= scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= jnp.where((cur_len - self.begin_index) >= 1 , snake_case__ , snake_case__ )
lowercase__ : Tuple= jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case__ , )
lowercase__ : int= jnp.where((cur_len - self.begin_index) < 2 , snake_case__ , snake_case__ )
lowercase__ : Optional[int]= jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case__ , snake_case__ , )
return jnp.where(
snake_case__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case__ , )
lowercase__ : List[str]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
lowercase__ : str= jnp.where(cur_len == self.begin_index , snake_case__ , snake_case__ )
lowercase__ : List[Any]= jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case__ , )
lowercase__ : Any= self.timestamp_begin + self.max_initial_timestamp_index
lowercase__ : str= jnp.where(
snake_case__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase__ : str= jax.nn.log_softmax(snake_case__ , axis=-1 )
def handle_cumulative_probs(snake_case__ , snake_case__ ):
lowercase__ : Dict= jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase__ : Union[str, Any]= jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case__ , )
lowercase__ : Optional[int]= jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
return scores
| 150
| 0
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar("_T")
class _a ( Generic[_T] ):
'''simple docstring'''
def __init__( self, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : list[_T] = list(iterable or [] )
SCREAMING_SNAKE_CASE : list[_T] = []
def __len__( self ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
'''simple docstring'''
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self._stacka.append(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._stacka.pop
SCREAMING_SNAKE_CASE : Dict = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase__( __UpperCamelCase: Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCamelCase ) )
elif isinstance(__UpperCamelCase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Tuple[int, ...] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for d in reversed(__UpperCamelCase ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(__UpperCamelCase ) )
@torch.jit.ignore
def lowercase__( __UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Sequence[int] ,__UpperCamelCase: Optional[Sequence[bool]] = None ,__UpperCamelCase: Optional[Sequence[bool]] = None ,):
"""simple docstring"""
def reduce_edge_list(__UpperCamelCase: List[bool] ) -> None:
SCREAMING_SNAKE_CASE : List[str] = True
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : str = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : int = [s == 0 for s in start]
reduce_edge_list(__UpperCamelCase )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(__UpperCamelCase ,__UpperCamelCase )]
reduce_edge_list(__UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCamelCase ) == 0:
return [()]
elif len(__UpperCamelCase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCamelCase ,__UpperCamelCase ):
if s == e:
path_list.append(slice(__UpperCamelCase ,s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[Any] = end[divergence_idx]
return tuple(
path + (slice(__UpperCamelCase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : List[str] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase__( __UpperCamelCase: torch.Tensor ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : str = list(_flat_idx_to_idx(__UpperCamelCase ,__UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : int = list(_flat_idx_to_idx(flat_end - 1 ,__UpperCamelCase ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : str = _get_minimal_slice_set(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: Dict[str, Any] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool = False ,__UpperCamelCase: Any = None ,__UpperCamelCase: bool = False ,):
"""simple docstring"""
if not (len(__UpperCamelCase ) > 0):
raise ValueError('Must provide at least one input' )
SCREAMING_SNAKE_CASE : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : Optional[int] = tuple([max(__UpperCamelCase ) for s in zip(*__UpperCamelCase )] )
def _prep_inputs(__UpperCamelCase: torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : str = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Tuple = tensor_tree_map(lambda __UpperCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
SCREAMING_SNAKE_CASE : List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCamelCase: torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : List[str] = prepped_outputs
for _ in range(__UpperCamelCase ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : List[str] = _select_chunk
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = partial(
_chunk_slice ,flat_start=__UpperCamelCase ,flat_end=min(__UpperCamelCase ,i + chunk_size ) ,no_batch_dims=len(__UpperCamelCase ) ,)
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(__UpperCamelCase ,__UpperCamelCase )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : int = layer(**__UpperCamelCase )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[Any] = tensor_tree_map(lambda __UpperCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,__UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
def assign(__UpperCamelCase: dict ,__UpperCamelCase: dict ) -> None:
for k, v in da.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
assign(__UpperCamelCase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = da[k]
assign(__UpperCamelCase ,__UpperCamelCase )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
for xa, xa in zip(__UpperCamelCase ,__UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : List[str] = xa
elif isinstance(__UpperCamelCase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda __UpperCamelCase : t.view(orig_batch_dims + t.shape[1:] ) ,__UpperCamelCase )
return out
class _a :
'''simple docstring'''
def __init__( self, A = 512, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : List[str] = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A ) -> bool:
try:
with torch.no_grad():
fn(*A, chunk_size=A )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : Any = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : Optional[Any] = i
SCREAMING_SNAKE_CASE : List[Any] = (i + len(A ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for aa, aa in zip(A, A ):
assert type(A ) == type(A )
if isinstance(A, (list, tuple) ):
consistent &= self._compare_arg_caches(A, A )
elif isinstance(A, A ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )]
SCREAMING_SNAKE_CASE : Optional[int] = [v for _, v in sorted(aa.items(), key=lambda A : x[0] )]
consistent &= self._compare_arg_caches(A, A )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase_ ( self, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda A : a.shape if isinstance(A, torch.Tensor ) else a, A, A )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A )
SCREAMING_SNAKE_CASE : str = self._compare_arg_caches(self.cached_arg_data, A )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : Any = self._determine_favorable_chunk_size(
A, A, A, )
SCREAMING_SNAKE_CASE : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 251
| 1
|
'''simple docstring'''
from math import factorial, pi
def SCREAMING_SNAKE_CASE__ ( __A , __A = 30 ) -> float:
if not isinstance(__A , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_snake_case = float(__A )
_snake_case = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A = 30 ) -> float:
if not isinstance(__A , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_snake_case = float(__A )
_snake_case = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 160
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowerCamelCase ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(lowerCAmelCase_ )][self.get_x(lowerCAmelCase_ )]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_x * x )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = 800, 600
lowercase : Tuple = imread("image_data/lena.jpg", 1)
lowercase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 160
| 1
|
'''simple docstring'''
import string
def __lowerCamelCase ( lowerCAmelCase_ ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_a : Union[str, Any] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_a : Optional[Any] = string.ascii_uppercase.find(lowerCAmelCase_ )
_a : List[str] = num - key
if num < 0:
_a : str = num + len(string.ascii_uppercase )
_a : int = translated + string.ascii_uppercase[num]
else:
_a : Dict = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def __lowerCamelCase ( ) -> None:
_a : int = input('Encrypted message: ' )
_a : Tuple = message.upper()
decrypt(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase : Any =torch.zeros(snake_case__ , snake_case__ )
else:
UpperCAmelCase : Union[str, Any] =None
UpperCAmelCase : Optional[int] =torch.nn.Parameter(snake_case__ )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : VQModel
__lowerCamelCase : CLIPTextModel
__lowerCamelCase : CLIPTokenizer
__lowerCamelCase : TransformeraDModel
__lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
__lowerCamelCase : VQDiffusionScheduler
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case__ , transformer=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , scheduler=snake_case__ , learned_classifier_free_sampling_embeddings=snake_case__ , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =len(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else 1
# get prompt text embeddings
UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase : int =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : List[str] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase : List[Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase : int =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase : int =prompt_embeds.repeat_interleave(snake_case__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase : Optional[int] =self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase : str =negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case__ , 1 , 1 )
else:
UpperCAmelCase : str =[''''''] * batch_size
UpperCAmelCase : Tuple =text_input_ids.shape[-1]
UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''pt''' , )
UpperCAmelCase : Optional[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.shape[1]
UpperCAmelCase : Union[str, Any] =negative_prompt_embeds.repeat(1 , snake_case__ , 1 )
UpperCAmelCase : Optional[Any] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : int =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 100 , snake_case__ = 5.0 , snake_case__ = 1.0 , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Tuple =len(snake_case__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}''' )
UpperCAmelCase : Tuple =batch_size * num_images_per_prompt
UpperCAmelCase : List[str] =guidance_scale > 1.0
UpperCAmelCase : List[Any] =self._encode_prompt(snake_case__ , snake_case__ , snake_case__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(snake_case__ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase : int =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase : Union[str, Any] =self.transformer.num_vector_embeds - 1
UpperCAmelCase : str =torch.full(snake_case__ , snake_case__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCAmelCase : Any =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ , device=self.device )
UpperCAmelCase : Any =self.scheduler.timesteps.to(self.device )
UpperCAmelCase : Optional[int] =latents
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase : Optional[int] =self.transformer(snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : str =model_output.chunk(2 )
UpperCAmelCase : Optional[int] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case__ , dim=1 , keepdim=snake_case__ )
UpperCAmelCase : Tuple =self.truncate(snake_case__ , snake_case__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase : Optional[Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : int =self.scheduler.step(snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Optional[int] =self.vqvae.config.vq_embed_dim
UpperCAmelCase : Optional[Any] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase : Dict =self.vqvae.quantize.get_codebook_entry(snake_case__ , shape=snake_case__ )
UpperCAmelCase : Tuple =self.vqvae.decode(snake_case__ , force_not_quantize=snake_case__ ).sample
UpperCAmelCase : Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =torch.sort(snake_case__ , 1 , descending=snake_case__ )
UpperCAmelCase : Union[str, Any] =torch.exp(snake_case__ )
UpperCAmelCase : Union[str, Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase : Optional[Any] =torch.full_like(keep_mask[:, 0:1, :] , snake_case__ )
UpperCAmelCase : Tuple =torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase : int =keep_mask[:, :-1, :]
UpperCAmelCase : int =keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase : Dict =log_p_x_0.clone()
UpperCAmelCase : List[Any] =-torch.inf # -inf = log(0)
return rv
| 348
| 0
|
import pprint
import requests
UpperCamelCase = """https://zenquotes.io/api"""
def _SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def _SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
UpperCamelCase = random_quotes()
pprint.pprint(response)
| 65
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCamelCase = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
UpperCamelCase = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _SCREAMING_SNAKE_CASE ( ):
A_ : Dict = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : List[Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _SCREAMING_SNAKE_CASE ( ):
A_ : Any = '''rougeLsum'''
A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
assert score > score_no_sep
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = ['''rouge1''', '''rouge2''', '''rougeL''']
A_ : Optional[int] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE )
A_ : Optional[int] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE )
assert score_sep == score_no_sep
def _SCREAMING_SNAKE_CASE ( ):
A_ : Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
A_ : Optional[int] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE ) == calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
A_ : Optional[Any] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=['''rougeLsum'''] , newline_sep=SCREAMING_SNAKE_CASE )['''rougeLsum''']
A_ : Optional[Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _SCREAMING_SNAKE_CASE ( ):
A_ : Any = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
A_ : List[Any] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : List[str] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 65
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( lowercase__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase__ ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__lowerCAmelCase : Dict = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
__lowerCAmelCase : Dict = PipelineDataFormat.from_str(
format=lowercase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase__ , lowercase__ )
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = nlp
__lowerCAmelCase : Tuple = reader
@staticmethod
def UpperCamelCase__ ( A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=A_ , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=A_ , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=A_ , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=A_ , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=A_ , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=A_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=A_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=A_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Any = self._nlp, []
for entry in self._reader:
__lowerCAmelCase : Optional[int] = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ )
if isinstance(A_ , A_ ):
outputs.append(A_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__lowerCAmelCase : Union[str, Any] = self._reader.save_binary(A_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A_ )
| 275
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_UpperCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_UpperCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_UpperCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] )
return (item, float(lowercase__ ))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 )
__lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = list(lowercase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCAmelCase : int = random.choice(lowercase__ )
return "".join(lowercase__ )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ):
__lowerCAmelCase : str = []
# Generate more children proportionally to the fitness score.
__lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1
__lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(lowercase__ ):
__lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0]
__lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ )
# Append new string to the population list.
pop.append(mutate(lowercase__ , lowercase__ ) )
pop.append(mutate(lowercase__ , lowercase__ ) )
return pop
def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCAmelCase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase__ )
# Generate random starting population.
__lowerCAmelCase : List[Any] = []
for _ in range(lowercase__ ):
population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population]
# Check if there is a matching evolution.
__lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase__ )
# Normalize population score to be between 0 and 1.
__lowerCAmelCase : List[Any] = [
(item, score / len(lowercase__ )) for item, score in population_score
]
# This is selection
for i in range(lowercase__ ):
population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase__ ) > N_POPULATION:
break
if __name__ == "__main__":
_UpperCamelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_UpperCamelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 275
| 1
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__()
_A = nn.ModuleList(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ , self.nets ) ):
_A , _A = controlnet(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# merge samples
if i == 0:
_A , _A = down_samples, mid_sample
else:
_A = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> int:
_A = 0
_A = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase_ , is_main_process=lowerCAmelCase_ , save_function=lowerCAmelCase_ , safe_serialization=lowerCAmelCase_ , variant=lowerCAmelCase_ , )
idx += 1
_A = model_path_to_save + F'''_{idx}'''
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
_A = 0
_A = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_A = pretrained_model_path
while os.path.isdir(lowerCAmelCase_ ):
_A = ControlNetModel.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
controlnets.append(lowerCAmelCase_ )
idx += 1
_A = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(lowerCAmelCase_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(lowerCAmelCase_ )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowerCAmelCase_ )
| 356
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''naver-clova-ix/donut-base-finetuned-docvqa'''
lowerCamelCase :Union[str, Any] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowerCamelCase :List[str] = '''document_qa'''
lowerCamelCase :Union[str, Any] = AutoProcessor
lowerCamelCase :str = VisionEncoderDecoderModel
lowerCamelCase :str = ['''image''', '''text''']
lowerCamelCase :List[str] = ['''text''']
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_A = task_prompt.replace("""{user_input}""" , lowerCAmelCase_ )
_A = self.pre_processor.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors="""pt""" ).input_ids
_A = self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=lowerCAmelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=lowerCAmelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=lowerCAmelCase_ , ).sequences
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_A = self.pre_processor.batch_decode(lowerCAmelCase_ )[0]
_A = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_A = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_A = re.sub(r"""<.*?>""" , """""" , lowerCAmelCase_ , count=1 ).strip() # remove first task start token
_A = self.pre_processor.tokenajson(lowerCAmelCase_ )
return sequence["answer"]
| 81
| 0
|
from ... import PretrainedConfig
lowercase__ :Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase_ : Any ='''nezha'''
def __init__( self ,A__=2_1_1_2_8 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=6_4 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0.1 ,A__=0 ,A__=2 ,A__=3 ,A__=True ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 101
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase__ ( _UpperCamelCase : Any="ro" , _UpperCamelCase : Optional[Any]="en" , _UpperCamelCase : Any="wmt16" , _UpperCamelCase : Tuple=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
snake_case = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
snake_case = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase )
if save_dir is None:
snake_case = f"""{dataset}-{pair}"""
snake_case = Path(_UpperCamelCase )
save_dir.mkdir(exist_ok=_UpperCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
snake_case = 'val' if split == 'validation' else split
snake_case = save_dir.joinpath(f"""{fn}.source""" )
snake_case = save_dir.joinpath(f"""{fn}.target""" )
snake_case = src_path.open('w+' )
snake_case = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 150
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if num < 0:
return False
__UpperCamelCase =num
__UpperCamelCase =0
while num > 0:
__UpperCamelCase =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_A = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_A = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ) -> Any:
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
__UpperCamelCase =getattr(A_ , normalizer_state.pop('type' ) )
__UpperCamelCase =do_lower_case
__UpperCamelCase =strip_accents
__UpperCamelCase =tokenize_chinese_chars
__UpperCamelCase =normalizer_class(**A_ )
__UpperCamelCase =do_lower_case
def _a ( self , A_ , A_=None ) -> List[str]:
__UpperCamelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 117
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
__a : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__a : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__a : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='''crop_size''' )
__a : Union[str, Any] = do_resize
__a : Optional[Any] = size
__a : str = resample
__a : Tuple = do_center_crop
__a : List[Any] = crop_size
__a : List[Any] = do_rescale
__a : int = rescale_factor
__a : Union[str, Any] = do_normalize
__a : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a : Any = image_std if image_std is not None else OPENAI_CLIP_STD
__a : int = do_convert_rgb
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__a : int = get_resize_output_image_size(_UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : Any = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__a : Optional[int] = do_resize if do_resize is not None else self.do_resize
__a : Dict = size if size is not None else self.size
__a : Tuple = get_size_dict(_UpperCAmelCase , param_name='''size''' , default_to_square=_UpperCAmelCase )
__a : Union[str, Any] = resample if resample is not None else self.resample
__a : int = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Tuple = crop_size if crop_size is not None else self.crop_size
__a : List[Any] = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' , default_to_square=_UpperCAmelCase )
__a : Any = do_rescale if do_rescale is not None else self.do_rescale
__a : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__a : Tuple = image_mean if image_mean is not None else self.image_mean
__a : Any = image_std if image_std is not None else self.image_std
__a : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a : List[str] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a : Union[str, Any] = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__a : Union[str, Any] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__a : Optional[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
__a : Dict = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
__a : Union[str, Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__a : Union[str, Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__a : Any = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__a : Dict = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 160
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__a : Optional[int] = img
__a : Any = img.shape[1]
__a : Optional[int] = img.shape[0]
__a : Tuple = dst_width
__a : List[Any] = dst_height
__a : Optional[int] = self.src_w / self.dst_w
__a : Tuple = self.src_h / self.dst_h
__a : Union[str, Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCamelCase ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a : Optional[int] = self.img[self.get_y(_UpperCAmelCase )][self.get_x(_UpperCAmelCase )]
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_x * x )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_y * y )
if __name__ == "__main__":
A , A = 800, 600
A = imread('''image_data/lena.jpg''', 1)
A = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 160
| 1
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_A = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[List[str]]
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[List[int]] = None
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "train"
UpperCAmelCase__ : Tuple = "dev"
UpperCAmelCase__ : Optional[Any] = "test"
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def _a ( A_ , A_ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _a ( A_ ) -> List[str]:
raise NotImplementedError
@staticmethod
def _a ( A_ , A_ , A_ , A_ , A_=False , A_="[CLS]" , A_=1 , A_="[SEP]" , A_=False , A_=False , A_=0 , A_=0 , A_=-100 , A_=0 , A_=True , ) -> List[InputFeatures]:
__UpperCamelCase ={label: i for i, label in enumerate(A_ )}
__UpperCamelCase =[]
for ex_index, example in enumerate(A_ ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' , A_ , len(A_ ) )
__UpperCamelCase =[]
__UpperCamelCase =[]
for word, label in zip(example.words , example.labels ):
__UpperCamelCase =tokenizer.tokenize(A_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A_ ) > 0:
tokens.extend(A_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__UpperCamelCase =tokenizer.num_special_tokens_to_add()
if len(A_ ) > max_seq_length - special_tokens_count:
__UpperCamelCase =tokens[: (max_seq_length - special_tokens_count)]
__UpperCamelCase =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__UpperCamelCase =[sequence_a_segment_id] * len(A_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__UpperCamelCase =[cls_token] + tokens
__UpperCamelCase =[pad_token_label_id] + label_ids
__UpperCamelCase =[cls_token_segment_id] + segment_ids
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__UpperCamelCase =[1 if mask_padding_with_zero else 0] * len(A_ )
# Zero-pad up to the sequence length.
__UpperCamelCase =max_seq_length - len(A_ )
if pad_on_left:
__UpperCamelCase =([pad_token] * padding_length) + input_ids
__UpperCamelCase =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__UpperCamelCase =([pad_token_segment_id] * padding_length) + segment_ids
__UpperCamelCase =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(A_ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(A_ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(A_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(A_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(A_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCamelCase =None
features.append(
InputFeatures(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , label_ids=A_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[InputFeatures]
UpperCAmelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ) -> List[str]:
# Load data features from cache or dataset file
__UpperCamelCase =os.path.join(
A_ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(A_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCamelCase =cached_features_file + '.lock'
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
__UpperCamelCase =torch.load(A_ )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
__UpperCamelCase =token_classification_task.read_examples_from_file(A_ , A_ )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCamelCase =token_classification_task.convert_examples_to_features(
A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , A_ )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , A_ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : List[InputFeatures]
UpperCAmelCase__ : int = -1_0_0
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_=False , A_ = Split.train , ) -> List[Any]:
__UpperCamelCase =token_classification_task.read_examples_from_file(A_ , A_ )
# TODO clean up all this to leverage built-in features of tokenizers
__UpperCamelCase =token_classification_task.convert_examples_to_features(
A_ , A_ , A_ , A_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__UpperCamelCase =tf.data.Dataset.from_generator(
A_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__UpperCamelCase =tf.data.Dataset.from_generator(
A_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , A_ ) -> InputFeatures:
return self.features[i]
| 117
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() )
__UpperCamelCase =sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if metric == "rouge2":
__UpperCamelCase ='{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__UpperCamelCase ='{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__UpperCamelCase ='{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__UpperCamelCase ='{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__UpperCamelCase =ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , )
class UpperCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> int:
__UpperCamelCase ={f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A_ )
@rank_zero_only
def _a ( self , A_ , A_ , A_ , A_=True ) -> None:
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
__UpperCamelCase =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__UpperCamelCase =Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase =od / 'test_results.txt'
__UpperCamelCase =od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase =od / f'{type_path}_results/{trainer.global_step:05d}.txt'
__UpperCamelCase =od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=A_ )
generations_file.parent.mkdir(exist_ok=A_ )
with open(A_ , 'a+' ) as writer:
for key in sorted(A_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase =metrics[key]
if isinstance(A_ , torch.Tensor ):
__UpperCamelCase =val.item()
__UpperCamelCase =f'{key}: {val:.6f}\n'
writer.write(A_ )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase ='\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(A_ )
@rank_zero_only
def _a ( self , A_ , A_ ) -> Optional[int]:
try:
__UpperCamelCase =pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase =pl_module.model.num_parameters()
__UpperCamelCase =count_trainable_parameters(A_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self , A_ , A_ ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A_ , A_ , 'test' )
@rank_zero_only
def _a ( self , A_ , A_ ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 117
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = 9, 14 # noqa: F841
UpperCAmelCase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase__ = defaultdict(__A )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase__ = mst(__A )
UpperCAmelCase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase__ = tuple(answer[:2] )
UpperCAmelCase__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 65
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65
| 1
|
import math
import random
def lowerCAmelCase__ ( a__: float , a__: bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase__ :Optional[Any] = 0.02
def lowerCAmelCase__ ( a__: int , a__: int ) -> float:
'''simple docstring'''
_UpperCAmelCase = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(a__ ):
# Forward propagation
_UpperCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase = (expected / 1_0_0) - layer_a
# Error delta
_UpperCAmelCase = layer_1_error * sigmoid_function(a__ , a__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :List[Any] = int(input('''Expected value: '''))
lowerCAmelCase__ :Any = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 364
|
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase__ :str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase__ :Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCAmelCase__ ( a__: list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(a__ ) ):
_UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a__ )
return next_generation
def lowerCAmelCase__ ( a__: list[list[int]] , a__: int ) -> list[Image.Image]:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
# Create output image
_UpperCAmelCase = Image.new('RGB' , (len(cells[0] ), len(a__ )) )
_UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(a__ ) ):
for y in range(len(cells[0] ) ):
_UpperCAmelCase = 2_5_5 - cells[y][x] * 2_5_5
_UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(a__ )
_UpperCAmelCase = new_generation(a__ )
return images
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 185
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = np.shape(UpperCamelCase_ )
if rows != columns:
__SCREAMING_SNAKE_CASE = (
"""\'table\' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = np.zeros((rows, columns) )
__SCREAMING_SNAKE_CASE = np.zeros((rows, columns) )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
__SCREAMING_SNAKE_CASE = (table[i][j] - total) / upper[j][j]
__SCREAMING_SNAKE_CASE = 1
for j in range(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : Union[str, Any] = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
| 0
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__UpperCAmelCase = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCAmelCase_ ( _a ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1 ) -> List[Any]:
UpperCamelCase : int = tokenizer
UpperCamelCase : Dict = dataset
UpperCamelCase : Optional[int] = len(snake_case_ ) if n_tasks is None else n_tasks
UpperCamelCase : List[str] = n_copies
def __iter__( self ) -> Tuple:
UpperCamelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
UpperCamelCase : Optional[int] = self.tokenizer(snake_case_, padding=snake_case_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase_ ( _a ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Dict = start_length
UpperCamelCase : Optional[Any] = eof_strings
UpperCamelCase : List[str] = tokenizer
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(snake_case_ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Union[str, Any]:
UpperCamelCase : List[Any] = re.split('(%s)' % '|'.join(_lowerCAmelCase ) , _lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=20 , **snake_case__ : str ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = defaultdict(_lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCAmelCase ) ):
with torch.no_grad():
UpperCamelCase : Optional[int] = batch["""ids"""].shape[-1]
UpperCamelCase : List[Any] = accelerator.unwrap_model(_lowerCAmelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCAmelCase , **_lowerCAmelCase )
# each task is generated batch_size times
UpperCamelCase : Optional[int] = batch["""task_id"""].repeat(_lowerCAmelCase )
UpperCamelCase : List[Any] = accelerator.pad_across_processes(
_lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase : Optional[Any] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase : Optional[Any] = generated_tokens.cpu().numpy()
UpperCamelCase : Optional[int] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCAmelCase , _lowerCAmelCase ):
gen_token_dict[task].append(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = [[] for _ in range(_lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase : int = tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
code_gens[task].append(remove_last_block(_lowerCAmelCase ) )
return code_gens
def UpperCamelCase ( ) -> Union[str, Any]:
# Setup configuration
UpperCamelCase : List[str] = HfArgumentParser(_lowerCAmelCase )
UpperCamelCase : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase : Union[str, Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase : Union[str, Any] = """false"""
if args.num_workers is None:
UpperCamelCase : int = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase : int = Accelerator()
set_seed(args.seed , device_specific=_lowerCAmelCase )
# Load model and tokenizer
UpperCamelCase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase : Dict = tokenizer.eos_token
UpperCamelCase : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase : Any = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCAmelCase , _lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
UpperCamelCase : Optional[int] = load_dataset('openai_humaneval' )
UpperCamelCase : List[Any] = load_metric('code_eval' )
UpperCamelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
UpperCamelCase : Dict = args.n_samples // args.batch_size
UpperCamelCase : int = TokenizedDataset(_lowerCAmelCase , human_eval['test'] , n_copies=_lowerCAmelCase , n_tasks=_lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase : Optional[Any] = DataLoader(_lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`'
' flag to enable code evaluation.' )
raise exception
UpperCamelCase : Dict = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : str = complete_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , n_tasks=_lowerCAmelCase , batch_size=args.batch_size , **_lowerCAmelCase , )
if accelerator.is_main_process:
UpperCamelCase : str = []
for task in tqdm(range(_lowerCAmelCase ) ):
UpperCamelCase : Any = human_eval["""test"""][task]["""test"""]
UpperCamelCase : int = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase : Optional[Any] = code_eval_metric.compute(
references=_lowerCAmelCase , predictions=_lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 371
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCAmelCase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCAmelCase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 103
| 0
|
from math import factorial
def _a ( lowerCamelCase: int = 20 ) -> int:
'''simple docstring'''
__A = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__A = n // 2
return int(factorial(lowerCamelCase ) / (factorial(lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
snake_case__ : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 117
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
snake_case__ : int = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
snake_case__ : str = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__(self :List[str] , _UpperCamelCase :List[str]=None , _UpperCamelCase :Optional[Any]=None , _UpperCamelCase :str=True , _UpperCamelCase :Optional[Any]="[UNK]" , _UpperCamelCase :Tuple="[SEP]" , _UpperCamelCase :List[Any]="[PAD]" , _UpperCamelCase :int="[CLS]" , _UpperCamelCase :Optional[int]="[MASK]" , _UpperCamelCase :Union[str, Any]=True , _UpperCamelCase :str=None , **_UpperCamelCase :List[str] , )-> str:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
__A = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**_UpperCamelCase )
__A = do_lower_case
def _lowerCAmelCase (self :Any , _UpperCamelCase :int , _UpperCamelCase :List[str]=None )-> List[Any]:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase (self :Any , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
__A = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 117
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15
| 1
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowerCamelCase: Any , lowerCamelCase: Union[str, Any] , lowerCamelCase: int ) -> Optional[Any]:
'''simple docstring'''
__A = MobileBertConfig.from_json_file(lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__A = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
__A = load_tf_weights_in_mobilebert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 117
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = cva.getAffineTransform(lowerCamelCase , lowerCamelCase )
return cva.warpAffine(lowerCamelCase , lowerCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
snake_case__ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
snake_case__ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
snake_case__ , snake_case__ : str = gray_img.shape
# set different points to rotate image
snake_case__ : Any = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
snake_case__ : str = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
snake_case__ : int = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
snake_case__ : List[str] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
snake_case__ : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
snake_case__ : Optional[Any] = plt.figure(1)
snake_case__ : Dict = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 117
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]:
"""simple docstring"""
A : List[str] = parent
A : Optional[Any] = batch_size
A : Tuple = image_size
A : int = patch_size
A : Optional[int] = num_channels
A : str = is_training
A : List[Any] = use_labels
A : Any = hidden_size
A : Any = num_hidden_layers
A : Optional[int] = num_attention_heads
A : Any = intermediate_size
A : List[str] = hidden_act
A : str = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Optional[int] = initializer_range
A : Dict = scope
A : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[Any] = (image_size // patch_size) ** 2
A : Tuple = num_patches + 2
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Tuple = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE )
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE )
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : Optional[int] = 1
A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE )
A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
A : str = self.type_sequence_label_size
A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Optional[Any] = 1
A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE )
A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[int] = self.prepare_config_and_inputs()
A, A, A : Tuple = config_and_inputs
A : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = TFDeiTModelTester(self )
A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(SCREAMING_SNAKE_CASE )
A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
A : Dict = self.default_image_processor
A : List[str] = prepare_img()
A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
A : Optional[int] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
A : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowercase : Tuple = parser.parse_args()
lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 311
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case : Any = logging.getLogger(__name__)
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[str] ):
if metric == "rouge2":
__lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__lowerCAmelCase = ModelCheckpoint(
dirpath=UpperCAmelCase_, filename=UpperCAmelCase_, monitor=F"""val_{metric}""", mode='max', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Tuple ):
return EarlyStopping(
monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=UpperCAmelCase_, verbose=UpperCAmelCase_, )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> str:
__lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / 'test_results.txt'
__lowerCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(SCREAMING_SNAKE_CASE_ )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def lowercase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ) -> Union[str, Any]:
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(SCREAMING_SNAKE_CASE_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'test' )
@rank_zero_only
def lowercase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> str:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 284
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A__ : List[str] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""BeitFeatureExtractor"""]
A__ : List[str] = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 185
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = len(lowerCamelCase_) // 2
# choose the middle 3 elements
lowerCAmelCase__ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] ={
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 94
| 1
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def lowercase ( A_ , A_ , A_ , A_=None )-> Dict:
'''simple docstring'''
a : Tuple = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a , a : Optional[Any] = True, True
a : List[str] = dfs(A_ , A_ , A_ , A_ )
return path
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
a : Any = 0
a : Dict = -1
for i in range(A_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
a : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a , a : Optional[Any] = check_circuit_or_path(A_ , A_ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
a : Any = 1
if check == 2:
a : List[Any] = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
a : Optional[Any] = dfs(A_ , A_ , A_ )
print(A_ )
def lowercase ( )-> Tuple:
'''simple docstring'''
a : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a : Dict = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a : Dict = {
1: [],
2: []
# all degree is zero
}
a : int = 10
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
if __name__ == "__main__":
main()
| 40
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __snake_case ( UpperCamelCase_ ):
_a = '''xlm-roberta-xl'''
def __init__( self : int , A_ : List[str]=2_5_0_8_8_0 , A_ : List[str]=2_5_6_0 , A_ : Optional[int]=3_6 , A_ : List[Any]=3_2 , A_ : Optional[int]=1_0_2_4_0 , A_ : Dict="gelu" , A_ : int=0.1 , A_ : Optional[Any]=0.1 , A_ : int=5_1_4 , A_ : Any=1 , A_ : Optional[Any]=0.02 , A_ : str=1e-05 , A_ : Dict=1 , A_ : Any=0 , A_ : Tuple=2 , A_ : str="absolute" , A_ : str=True , A_ : List[str]=None , **A_ : Dict , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : List[str] = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : List[str]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103
| 0
|
def __lowerCAmelCase ( a__ , a__ ):
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCAmelCase ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 351
|
A : Optional[Any] = tuple[float, float, float]
A : Union[str, Any] = tuple[float, float, float]
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = end_pointa[0] - end_pointa[0]
__a = end_pointa[1] - end_pointa[1]
__a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 10 ) -> bool:
__a = create_vector(a__ , a__ )
__a = create_vector(a__ , a__ )
return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ )
| 33
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE :str = '\\n\n'
SCREAMING_SNAKE_CASE :List[str] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
SCREAMING_SNAKE_CASE :Dict = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) ,reference_urls=["https://huggingface.co/docs/transformers/perplexity"] ,)
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : int ,A : int = 16 ,A : bool = True ,A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__A = "cuda"
else:
__A = "cuda" if torch.cuda.is_available() else "cpu"
__A = AutoModelForCausalLM.from_pretrained(A )
__A = model.to(A )
__A = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__A = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__A = model.config.max_length - 1
else:
__A = model.config.max_length
__A = tokenizer(
A ,add_special_tokens=A ,padding=A ,truncation=A ,max_length=A ,return_tensors="pt" ,return_attention_mask=A ,).to(A )
__A = encodings["input_ids"]
__A = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__A = []
__A = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 ,len(A ) ,A ) ):
__A = min(start_index + batch_size ,len(A ) )
__A = encoded_texts[start_index:end_index]
__A = attn_masks[start_index:end_index]
if add_start_token:
__A = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__A = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
__A = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(A ), attn_mask] ,dim=1 )
__A = encoded_batch
with torch.no_grad():
__A = model(A ,attention_mask=A ).logits
__A = out_logits[..., :-1, :].contiguous()
__A = labels[..., 1:].contiguous()
__A = attn_mask[..., 1:].contiguous()
__A = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 15
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = {}
__A = job["started_at"]
__A = job["completed_at"]
__A = date_parser.parse(a_ )
__A = date_parser.parse(a_ )
__A = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__A = start
__A = end
__A = duration_in_min
return job_info
def UpperCAmelCase ( a_ , a_=None ) -> str:
"""simple docstring"""
__A = None
if token is not None:
__A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
__A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__A = requests.get(a_ , headers=a_ ).json()
__A = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
__A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
__A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 15
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Dict = Dict[str, Any]
a_ : Optional[int] = List[Prediction]
@add_end_docstrings(A__ )
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> Any:
super().__init__(*__a , **__a)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , 'vision')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *a , **a) -> Union[Predictions, List[Prediction]]:
return super().__call__(*__a , **__a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = load_image(__a)
SCREAMING_SNAKE_CASE = torch.IntTensor([[image.height, image.width]])
SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors='pt')
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt')
SCREAMING_SNAKE_CASE = target_size
return inputs
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size')
SCREAMING_SNAKE_CASE = self.model(**__a)
SCREAMING_SNAKE_CASE = outputs.__class__({'target_size': target_size, **outputs})
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE = model_inputs['bbox']
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , a , a=0.9) -> Dict:
SCREAMING_SNAKE_CASE = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = target_size[0].tolist()
def unnormalize(a):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1)
SCREAMING_SNAKE_CASE = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
SCREAMING_SNAKE_CASE = [unnormalize(__a) for bbox in model_outputs['bbox'].squeeze(0)]
SCREAMING_SNAKE_CASE = ['score', 'label', 'box']
SCREAMING_SNAKE_CASE = [dict(zip(__a , __a)) for vals in zip(scores.tolist() , __a , __a) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(__a , __a , __a)
SCREAMING_SNAKE_CASE = raw_annotations[0]
SCREAMING_SNAKE_CASE = raw_annotation['scores']
SCREAMING_SNAKE_CASE = raw_annotation['labels']
SCREAMING_SNAKE_CASE = raw_annotation['boxes']
SCREAMING_SNAKE_CASE = scores.tolist()
SCREAMING_SNAKE_CASE = [self.model.config.idalabel[label.item()] for label in labels]
SCREAMING_SNAKE_CASE = [self._get_bounding_box(__a) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
SCREAMING_SNAKE_CASE = ['score', 'label', 'box']
SCREAMING_SNAKE_CASE = [
dict(zip(__a , __a))
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'])
]
return annotation
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 362
|
from scipy.stats import pearsonr
import datasets
a_ : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ : Optional[int] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ : Any = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> Optional[Any]:
if return_pvalue:
SCREAMING_SNAKE_CASE = pearsonr(a , a)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a , a)[0])}
| 327
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=3_0 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=3 , snake_case=None , snake_case=2 , ):
'''simple docstring'''
UpperCAmelCase : Any = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Any = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : str = scope
UpperCAmelCase : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2
UpperCAmelCase : Any = num_patches + 2
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFDeiTModel(config=snake_case )
UpperCAmelCase : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = TFDeiTForMaskedImageModeling(config=snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Optional[int] = TFDeiTForMaskedImageModeling(snake_case )
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = model(snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.type_sequence_label_size
UpperCAmelCase : Optional[Any] = TFDeiTForImageClassification(snake_case )
UpperCAmelCase : Optional[int] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : int = 1
UpperCAmelCase : Dict = TFDeiTForImageClassification(snake_case )
UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : int = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = TFDeiTModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Dense ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(snake_case )
UpperCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def A_ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = TFDeiTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : int = image_processor(images=snake_case , return_tensors="tf" )
# forward pass
UpperCAmelCase : Dict = model(**snake_case )
# verify the logits
UpperCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : Dict = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 311
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 160
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase : Any = logging.get_logger(__name__)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
_snake_case = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_snake_case = text
def lowerCamelCase ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.generated_responses.append(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
_snake_case = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_snake_case = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
_lowerCamelCase , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=0 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = super().__call__(lowerCAmelCase_ , num_workers=lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=32 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_snake_case = self.tokenizer._build_conversation_input_ids(lowerCAmelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(lowerCAmelCase_ )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=10 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length )
_snake_case = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs['attention_mask'][:, -trim:]
_snake_case = model_inputs.pop('conversation' )
_snake_case = max_length
_snake_case = self.model.generate(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=True ):
"""simple docstring"""
_snake_case = model_outputs['output_ids']
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
_snake_case = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowerCAmelCase_ )
return conversation
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 160
| 1
|
def __lowerCamelCase ( UpperCAmelCase_ : list ):
"""simple docstring"""
a :List[Any] = len(UpperCAmelCase_ )
for i in range(1 , UpperCAmelCase_ ):
a :Union[str, Any] = collection[i]
a :List[Any] = 0
a :Dict = i - 1
while low <= high:
a :List[Any] = (low + high) // 2
if val < collection[mid]:
a :str = mid - 1
else:
a :Dict = mid + 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ , -1 ):
a :Tuple = collection[j - 1]
a :List[Any] = val
return collection
if __name__ == "__main__":
snake_case : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 94
|
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ):
"""simple docstring"""
a , a :Optional[Any] = set(UpperCAmelCase_ ), [start]
while stack:
a :Optional[int] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
snake_case : Optional[int] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 94
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , *snake_case_ : Optional[Any] , **snake_case_ : Tuple ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 354
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = RoFormerTokenizer
__UpperCamelCase : Union[str, Any] = RoFormerTokenizerFast
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
def lowerCAmelCase__ ( self : Optional[Any] ):
super().setUp()
def lowerCAmelCase__ ( self : List[str] , **snake_case_ : Union[str, Any] ):
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , **snake_case_ : Union[str, Any] ):
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = """永和服装饰品有限公司,今天天气非常好"""
UpperCamelCase_: int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Dict = self.get_tokenizer()
UpperCamelCase_, UpperCamelCase_: Optional[int] = self.get_chinese_input_output_texts()
UpperCamelCase_: Optional[int] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , output_text.split() )
UpperCamelCase_: Dict = tokens + [tokenizer.unk_token]
UpperCamelCase_: int = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[int] = self.get_rust_tokenizer()
UpperCamelCase_, UpperCamelCase_: Any = self.get_chinese_input_output_texts()
UpperCamelCase_: int = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , output_text.split() )
UpperCamelCase_: Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : Tuple ):
pass
def lowerCAmelCase__ ( self : Union[str, Any] ):
pass
| 223
| 0
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Optional[int]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE : int = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) ->str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(_SCREAMING_SNAKE_CASE ) # fails here
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE : Optional[int] = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE : Any = dc.update(2 )
SCREAMING_SNAKE_CASE : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE : Tuple = dc.update(3 )
SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is True and reset is False
self.assertTrue(_SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 363
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionSAGPipeline
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE : Tuple = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = '''.'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : int = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = '''.'''
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : int = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE : Optional[int] = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = '''.'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 19
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
# Checks if the entire collection has been sorted
if len(UpperCamelCase_ ) <= 1 or n <= 1:
return
insert_next(UpperCamelCase_ , n - 1 )
rec_insertion_sort(UpperCamelCase_ , n - 1 )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
# Checks order between adjacent elements
if index >= len(UpperCamelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (
collection[index],
collection[index - 1],
)
insert_next(UpperCamelCase_ , index + 1 )
if __name__ == "__main__":
__magic_name__ = input("Enter integers separated by spaces: ")
__magic_name__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 100
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = params
snake_case_ : int = np.array(_A )
snake_case_ : Optional[int] = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , _A : Optional[int] ) -> str:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.lengths )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.params.max_model_input_size
snake_case_ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A : Union[str, Any] , _A : Dict ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
if self.params.mlm:
snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[int] = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
snake_case_ : Tuple = np.array(_A )
snake_case_ : int = np.array(_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = len(self )
snake_case_ : int = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : List[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = len(self )
snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Any = (unk_occs / self.lengths) < 0.5
snake_case_ : List[Any] = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : Tuple = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = [t[0] for t in batch]
snake_case_ : int = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
snake_case_ : str = max(_A )
# Pad token ids
if self.params.mlm:
snake_case_ : int = self.params.special_tok_ids['pad_token']
else:
snake_case_ : Dict = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 327
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = 32
a :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
a :Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
a :Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCamelCase , num_layers=1 , )
torch.manual_seed(0 )
a :str = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
a :List[Any] = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
a :List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
a :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
a :Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
a :List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
a :Any = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
a :List[Any] = AutoencoderKL()
a :Dict = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
a :Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
a :Optional[int] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a :List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
a :Dict = pipe('''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
a :Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a :str = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
a :Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a :Dict = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
a :Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 281
|
def __lowerCamelCase ( UpperCAmelCase_ : int = 100 ):
"""simple docstring"""
a :List[Any] = 0
a :List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
|
"""simple docstring"""
def __A ( a_ :int = 60_08_51_47_51_43) -> int:
try:
__a : List[Any] = int(a_)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
__a : int = 1
__a : List[Any] = 2
while i * i <= n:
while n % i == 0:
__a : List[str] = i
n //= i
i += 1
if n > 1:
__a : Optional[int] = n
return int(a_)
if __name__ == "__main__":
print(F'{solution() = }')
| 160
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=12 ,_snake_case=7 ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=99 ,_snake_case=32 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=0.02 ,_snake_case=0 ,_snake_case=None ,):
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = projection_dim
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase_ : Any = input_mask.numpy()
UpperCAmelCase_ , UpperCAmelCase_ : str = input_mask.shape
UpperCAmelCase_ : str = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def UpperCamelCase__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = TFBlipTextModel(config=_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,attention_mask=_snake_case ,training=_snake_case )
UpperCAmelCase_ : Dict = model(_snake_case ,training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = config_and_inputs
UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =(TFBlipTextModel,) if is_tf_available() else ()
__A : List[Any] =False
__A : List[Any] =False
__A : Any =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = BlipTextModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCamelCase__ ( self ,_snake_case=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 67
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int ="mra"
def __init__( self ,_snake_case=5_02_65 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=1 ,_snake_case=0.02 ,_snake_case=1E-5 ,_snake_case="absolute" ,_snake_case=4 ,_snake_case="full" ,_snake_case=0 ,_snake_case=0 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Optional[Any] = block_per_row
UpperCAmelCase_ : Any = approx_mode
UpperCAmelCase_ : Dict = initial_prior_first_n_blocks
UpperCAmelCase_ : str = initial_prior_diagonal_n_blocks
| 67
| 1
|
import math
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = 2
lowerCamelCase = int(math.sqrt(__lowerCamelCase ) ) # Size of every segment
lowerCamelCase = [True] * (end + 1)
lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCamelCase )
for i in range(start * start , end + 1 , __lowerCamelCase ):
lowerCamelCase = False
start += 1
prime += in_prime
lowerCamelCase = end + 1
lowerCamelCase = min(2 * end , __lowerCamelCase )
while low <= n:
lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCamelCase , high + 1 , __lowerCamelCase ):
lowerCamelCase = False
for j in range(len(__lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase = high + 1
lowerCamelCase = min(high + end , __lowerCamelCase )
return prime
print(sieve(10**6))
| 252
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase : Optional[int] ={
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : List[Any] ={
'''RUCAIBox/mvp''': 1_024,
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
__A = MvpTokenizer
def __init__( self : Optional[Any] , lowercase : Any=None , lowercase : List[Any]=None , lowercase : Dict=None , lowercase : int="replace" , lowercase : int="<s>" , lowercase : List[str]="</s>" , lowercase : Optional[Any]="</s>" , lowercase : List[str]="<s>" , lowercase : List[str]="<unk>" , lowercase : List[str]="<pad>" , lowercase : Tuple="<mask>" , lowercase : Tuple=False , lowercase : Dict=True , **lowercase : List[str] , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowercase_ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :List[str] = getattr(lowercase , pre_tok_state.pop("type" ) )
lowercase_ :int = add_prefix_space
lowercase_ :Optional[int] = pre_tok_class(**lowercase )
lowercase_ :Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ :List[Any] = "post_processor"
lowercase_ :str = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowercase_ :Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ :int = tuple(state["sep"] )
if "cls" in state:
lowercase_ :Any = tuple(state["cls"] )
lowercase_ :int = False
if state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :Union[str, Any] = add_prefix_space
lowercase_ :int = True
if state.get("trim_offsets" , lowercase ) != trim_offsets:
lowercase_ :Any = trim_offsets
lowercase_ :int = True
if changes_to_apply:
lowercase_ :Tuple = getattr(lowercase , state.pop("type" ) )
lowercase_ :Any = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowercase_ :Union[str, Any] = value
def lowercase__ ( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Any ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Optional[Any] , *lowercase : Optional[int] , **lowercase : int ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :str = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict , lowercase : int=None ):
"""simple docstring"""
lowercase_ :List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
lowercase_ :Union[str, Any] = [self.sep_token_id]
lowercase_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223
| 0
|
'''simple docstring'''
UpperCamelCase_ : Optional[int] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase_ : Any = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: int , _UpperCamelCase: Dict ) -> Optional[Any]:
"""simple docstring"""
_snake_case = start
# add current to visited
visited.append(_UpperCamelCase )
_snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_snake_case = topological_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(_UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
_snake_case = topological_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase_ : Any = topological_sort('''a''', [], [])
print(sort)
| 142
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : List[Any] = """BridgeTowerImageProcessor"""
SCREAMING_SNAKE_CASE_ : List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
_snake_case = self.tokenizer(
text=_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,pad_to_multiple_of=_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,return_overflowing_tokens=_SCREAMING_SNAKE_CASE ,return_special_tokens_mask=_SCREAMING_SNAKE_CASE ,return_offsets_mapping=_SCREAMING_SNAKE_CASE ,return_length=_SCREAMING_SNAKE_CASE ,verbose=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
# add pixel_values + pixel_mask
_snake_case = self.image_processor(
_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
@property
def _lowercase ( self ) -> Any:
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 142
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "gpt_neox"
def __init__( self : int , snake_case_ : Union[str, Any]=50_432 , snake_case_ : Union[str, Any]=6_144 , snake_case_ : Union[str, Any]=44 , snake_case_ : Any=64 , snake_case_ : Union[str, Any]=24_576 , snake_case_ : List[Any]="gelu" , snake_case_ : Any=0.25 , snake_case_ : Dict=10_000 , snake_case_ : List[Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=2_048 , snake_case_ : str=0.02 , snake_case_ : Dict=1E-5 , snake_case_ : Union[str, Any]=True , snake_case_ : Dict=0 , snake_case_ : Optional[Any]=2 , snake_case_ : Dict=False , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : List[Any] , ):
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : Optional[int] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : str = rotary_pct
snake_case__ : Tuple = rotary_emb_base
snake_case__ : List[str] = attention_dropout
snake_case__ : Tuple = hidden_dropout
snake_case__ : Dict = classifier_dropout
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[int] = use_cache
snake_case__ : Union[str, Any] = tie_word_embeddings
snake_case__ : str = use_parallel_residual
snake_case__ : int = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCamelCase ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
snake_case__ : Optional[int] = self.rope_scaling.get("""type""" , snake_case_ )
snake_case__ : int = self.rope_scaling.get("""factor""" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 35
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase = False
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self ) -> List[Any]:
return 12
@property
def __snake_case ( self ) -> int:
return 12
@property
def __snake_case ( self ) -> Optional[int]:
return 32
@property
def __snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __snake_case ( self ) -> str:
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __snake_case ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
@property
def __snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = 12
lowerCAmelCase = 12
lowerCAmelCase = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
lowerCAmelCase = TransformeraDModel(**snake_case_ )
return model
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=snake_case_ )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase = """teddy bear playing in the pool"""
lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self ) -> Dict:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=snake_case_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , transformer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , )
lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase = """teddy bear playing in the pool"""
lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=snake_case_ , output_type="""np""" , return_dict=snake_case_ , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ) -> Dict:
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
lowerCAmelCase = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
lowerCAmelCase = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(0 )
lowerCAmelCase = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=snake_case_ , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 371
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool:
"""simple docstring"""
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 187
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
snake_case : List[str] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
snake_case : List[Any] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : Tuple = create_model(
"HTSAT-tiny" , "roberta" , _snake_case , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_snake_case , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Dict = R".*sequential.(\d+).*"
__magic_name__ : List[str] = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__magic_name__ : Any = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
__magic_name__ : int = re.match(_snake_case , _snake_case ).group(1 )
__magic_name__ : Union[str, Any] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
__magic_name__ : Tuple = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__magic_name__ : str = 1 if projecton_layer == 0 else 2
__magic_name__ : int = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__magic_name__ : Union[str, Any] = value
__magic_name__ : List[str] = mixed_qkv.size(0 ) // 3
__magic_name__ : int = mixed_qkv[:qkv_dim]
__magic_name__ : str = mixed_qkv[qkv_dim : qkv_dim * 2]
__magic_name__ : List[str] = mixed_qkv[qkv_dim * 2 :]
__magic_name__ : List[Any] = query_layer
__magic_name__ : int = key_layer
__magic_name__ : Any = value_layer
else:
__magic_name__ : str = value
return model_state_dict
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Any=False ) -> Dict:
'''simple docstring'''
__magic_name__ , __magic_name__ : Any = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
__magic_name__ : Union[str, Any] = clap_model.state_dict()
__magic_name__ : Tuple = rename_state_dict(_snake_case )
__magic_name__ : List[str] = ClapConfig()
__magic_name__ : Tuple = enable_fusion
__magic_name__ : Any = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
snake_case : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 281
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281
| 1
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ):
lowercase_ : int = 'backbone.' if is_semantic else ''
lowercase_ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
for i in range(config.num_hidden_layers ):
lowercase_ : Any = 'backbone.' if is_semantic else ''
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowercase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = q_bias
lowercase_ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowercase_ : Tuple = gamma_a
lowercase_ : List[Any] = gamma_a
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = val
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ):
lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True
lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Any = 10_24
lowercase_ : List[str] = 40_96
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Optional[Any] = 16
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : int = 'rvlcdip-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowercase_ : List[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : str = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
lowercase_ : int = encoding['pixel_values']
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits
# verify logits
lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="timm_backbone"
def __init__( self : Optional[int] , a : int=None , a : Dict=3 , a : Any=True , a : List[str]=True , a : Optional[Any]=None , **a : Optional[int] , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = backbone
__lowerCamelCase = num_channels
__lowerCamelCase = features_only
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = True
__lowerCamelCase = out_indices if out_indices is not None else (-1,)
| 67
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67
| 1
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
_snake_case = namedtuple("""covid_data""", """cases deaths recovered""")
def _A ( __magic_name__ = "https://www.worldometers.info/coronavirus/" ):
lowercase__ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__magic_name__ ).content ).xpath(__magic_name__ ) )
_snake_case = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 201
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _A ( ):
lowercase__ = HfArgumentParser(__magic_name__ )
lowercase__ = parser.parse_args_into_dataclasses()[0]
lowercase__ = TensorFlowBenchmark(args=__magic_name__ )
try:
lowercase__ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowercase__ = " ".join(str(__magic_name__ ).split(" " )[:-1] )
lowercase__ = ""
lowercase__ = eval(str(__magic_name__ ).split(" " )[-1] )
lowercase__ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = full_error_msg + begin_error_msg + str(__magic_name__ )
raise ValueError(__magic_name__ )
benchmark.run()
if __name__ == "__main__":
main()
| 201
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.