code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowerCamelCase : int ):
UpperCamelCase :Tuple = order
# a_{0} ... a_{k}
UpperCamelCase :Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCamelCase :str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCamelCase :List[str] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCamelCase :List[Any] = [0.0] * self.order
def _A ( self : Optional[Any] , __lowerCamelCase : list[float] , __lowerCamelCase : list[float] ):
if len(__lowerCamelCase ) < self.order:
UpperCamelCase :List[Any] = [1.0, *a_coeffs]
if len(__lowerCamelCase ) != self.order + 1:
UpperCamelCase :int = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != self.order + 1:
UpperCamelCase :int = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
UpperCamelCase :Optional[Any] = a_coeffs
UpperCamelCase :Dict = b_coeffs
def _A ( self : List[str] , __lowerCamelCase : float ):
UpperCamelCase :Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCamelCase :Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCamelCase :int = self.input_history[:-1]
UpperCamelCase :str = self.output_history[:-1]
UpperCamelCase :Optional[int] = sample
UpperCamelCase :List[Any] = result
return result
| 38
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(__magic_name__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" )
UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" )
UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ )
UpperCamelCase :List[Any] = number_of_qubits
for i in range(__magic_name__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__magic_name__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__magic_name__ , __magic_name__ )
# simulate with 10000 shots
UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" )
UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 38
| 1
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase:
"""simple docstring"""
@staticmethod
def _a ( *_lowerCamelCase , **_lowerCamelCase ):
pass
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Any =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = DepthEstimationPipeline(model=_lowerCamelCase , image_processor=_lowerCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , _lowerCamelCase )
import datasets
UpperCamelCase_: List[Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
UpperCamelCase_: List[str] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , _lowerCamelCase , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _a ( self ):
pass
@slow
@require_torch
def _a ( self ):
UpperCamelCase_: int = 'Intel/dpt-large'
UpperCamelCase_: List[str] = pipeline('depth-estimation' , model=_lowerCamelCase )
UpperCamelCase_: List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCamelCase_: List[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_6_2 )
@require_torch
def _a ( self ):
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 353
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292
| 0
|
from __future__ import annotations
import pandas as pd
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = [0] * no_of_processes
snake_case_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_A ):
snake_case_ = burst_time[i]
snake_case_ = 0
snake_case_ = 0
snake_case_ = 999999999
snake_case_ = 0
snake_case_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case_ = remaining_time[j]
snake_case_ = j
snake_case_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case_ = remaining_time[short]
if minm == 0:
snake_case_ = 999999999
if remaining_time[short] == 0:
complete += 1
snake_case_ = False
# Find finish time of current process
snake_case_ = increment_time + 1
# Calculate waiting time
snake_case_ = finish_time - arrival_time[short]
snake_case_ = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case_ = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = [0] * no_of_processes
for i in range(_A ):
snake_case_ = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
for i in range(_A ):
snake_case_ = total_waiting_time + waiting_time[i]
snake_case_ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
lowercase__ : Dict = int(input())
lowercase__ : Any = [0] * no_of_processes
lowercase__ : Optional[int] = [0] * no_of_processes
lowercase__ : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
lowercase__ , lowercase__ : Optional[int] = map(int, input().split())
lowercase__ : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase__ : Union[str, Any] = burst_time
lowercase__ : List[str] = no_of_processes
lowercase__ : Union[str, Any] = waiting_time
lowercase__ : Optional[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase__ : Tuple = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 187
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase__ : str = NewType("DataClass", Any)
lowercase__ : Union[str, Any] = NewType("DataClassType", Any)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {str(_A ): choice for choice in choices}
return lambda _A : str_to_choice.get(_A , _A )
def lowerCamelCase__ ( *,
_A = None , _A = None , _A = dataclasses.MISSING , _A = dataclasses.MISSING , _A = None , **_A , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=_A , default=_A , default_factory=_A , **_A )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 42
def __init__( self : Optional[Any] , __lowercase : Union[DataClassType, Iterable[DataClassType]] , **__lowercase : Optional[int] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
snake_case_ = [dataclass_types]
snake_case_ = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def snake_case__ ( __lowercase : ArgumentParser , __lowercase : dataclasses.Field ):
"""simple docstring"""
snake_case_ = f"--{field.name}"
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
snake_case_ = kwargs.pop("aliases" , [] )
if isinstance(__lowercase , __lowercase ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__lowercase , "UnionType" ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f" Problem encountered in field '{field.name}'." )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = "?"
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
snake_case_ = field.type.__args__[0]
snake_case_ = "+"
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(f"--no_{field.name}" , action="store_false" , dest=field.name , **__lowercase )
def snake_case__ ( self : List[str] , __lowercase : DataClassType ):
"""simple docstring"""
if hasattr(__lowercase , "_argument_group_name" ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowercase ):
snake_case_ = ".".join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any]=None , __lowercase : Union[str, Any]=False , __lowercase : List[str]=True , __lowercase : int=None , __lowercase : Optional[int]=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ , snake_case_ = args_file_parser.parse_known_args(args=__lowercase )
snake_case_ = vars(__lowercase ).get(args_file_flag.lstrip("-" ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ , snake_case_ = self.parse_known_args(args=__lowercase )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case__ ( self : int , __lowercase : Dict[str, Any] , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}" )
return tuple(__lowercase )
def snake_case__ ( self : List[Any] , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
with open(Path(__lowercase ) , encoding="utf-8" ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self : int , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase )
| 187
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str ) -> Any:
__A : List[Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__A : Any = DetaConfig(
backbone_config=__snake_case , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__snake_case , with_box_refine=__snake_case , two_stage=__snake_case , )
# set labels
__A : Dict = 'huggingface/label-files'
if "o365" in model_name:
__A : List[Any] = 3_66
__A : str = 'object365-id2label.json'
else:
__A : Optional[int] = 91
__A : List[Any] = 'coco-detection-id2label.json'
__A : str = num_labels
__A : Any = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
__A : List[str] = {int(__snake_case ): v for k, v in idalabel.items()}
__A : str = idalabel
__A : Dict = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __snake_case : str ) -> Union[str, Any]:
__A : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any ) -> Optional[Any]:
__A : Union[str, Any] = dct.pop(__snake_case )
__A : Tuple = val
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Union[str, Any] ) -> Optional[Any]:
__A : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__A : Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__A : Dict = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__A : Union[str, Any] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A : List[str] = in_proj_weight[:dim, :]
__A : Dict = in_proj_bias[: dim]
__A : int = in_proj_weight[
dim : dim * 2, :
]
__A : Dict = in_proj_bias[
dim : dim * 2
]
__A : List[Any] = in_proj_weight[
-dim :, :
]
__A : Dict = in_proj_bias[-dim :]
# fmt: on
def _lowerCAmelCase ( __snake_case : int , __snake_case : Dict ) -> List[Any]:
# transformer decoder self-attention layers
__A : Optional[int] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__A : Optional[int] = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__A : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__A : int = in_proj_weight[:hidden_size, :]
__A : Optional[int] = in_proj_bias[:hidden_size]
__A : List[str] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__A : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
__A : List[str] = in_proj_weight[-hidden_size:, :]
__A : List[Any] = in_proj_bias[-hidden_size:]
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Tuple = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : str , __snake_case : str , __snake_case : List[str] ) -> Tuple:
__A : Optional[int] = get_deta_config(__snake_case )
# load original state dict
if model_name == "deta-swin-large":
__A : List[Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__A : Optional[Any] = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'Model name {model_name} not supported' )
__A : List[str] = torch.load(__snake_case , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__snake_case , param.shape )
# rename keys
__A : int = create_rename_keys(__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_swin_q_k_v(__snake_case , config.backbone_config )
read_in_decoder_q_k_v(__snake_case , __snake_case )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__A : int = state_dict.pop(__snake_case )
__A : List[Any] = val
if "input_proj" in key:
__A : Union[str, Any] = state_dict.pop(__snake_case )
__A : List[str] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__A : Optional[int] = state_dict.pop(__snake_case )
__A : Optional[int] = val
# finally, create HuggingFace model and load state dict
__A : str = DetaForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
__A : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__snake_case )
# load image processor
__A : Any = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__A : Optional[int] = prepare_img()
__A : List[str] = processor(images=__snake_case , return_tensors='pt' )
__A : List[Any] = encoding['pixel_values']
__A : Any = model(pixel_values.to(__snake_case ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__A : Any = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
__A : int = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
__A : str = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
__A : Union[str, Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__snake_case ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__snake_case ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 190
|
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
__A : List[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(__snake_case )
if number < 1:
__A : Union[str, Any] = f'Input value of [number={number}] must be > 0'
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2
__A : Union[str, Any] = [3, 5]
__A : List[Any] = 2
__A : Optional[Any] = 3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : str = 0
try:
lowercase__ : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 190
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
__UpperCamelCase : List[str] = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__UpperCamelCase : Dict = BASE_URL + """/user"""
# https://github.com/settings/tokens
__UpperCamelCase : Union[str, Any] = os.environ.get("""USER_TOKEN""", """""")
def a_ ( _A ) -> dict[Any, Any]:
"""simple docstring"""
snake_case__ = {
'Authorization': f'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_A , headers=_A ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 307
|
import os
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = os.path.join(os.path.dirname(_A ) , 'num.txt' )
with open(_A ) as file_hand:
return str(sum(int(_A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 307
| 1
|
_snake_case : List[Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_snake_case : Union[str, Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = from_type.lower().strip('''s''' )
_a = to_type.lower().strip('''s''' )
_a = UNIT_SYMBOL.get(A__ , A__ )
_a = UNIT_SYMBOL.get(A__ , A__ )
if from_sanitized not in METRIC_CONVERSION:
_a = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(A__ )}'
)
raise ValueError(A__ )
if to_sanitized not in METRIC_CONVERSION:
_a = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(A__ )}'
)
raise ValueError(A__ )
_a = METRIC_CONVERSION[from_sanitized]
_a = METRIC_CONVERSION[to_sanitized]
_a = 1
if from_exponent > to_exponent:
_a = from_exponent - to_exponent
else:
_a = -(to_exponent - from_exponent)
return value * pow(10 , A__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : int = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['YolosFeatureExtractor']
_snake_case : Optional[int] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 0
|
"""simple docstring"""
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE__ = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
SCREAMING_SNAKE_CASE__ = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
SCREAMING_SNAKE_CASE__ = val
return f[i][j]
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE__ = dp[i - 1][w_]
return dp[n][w_], dp
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
SCREAMING_SNAKE_CASE__ = len(lowercase_ )
if num_items != len(lowercase_ ):
SCREAMING_SNAKE_CASE__ = (
"""The number of weights must be the same as the number of values.\n"""
f"""But got {num_items} weights and {len(lowercase_ )} values"""
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
SCREAMING_SNAKE_CASE__ = (
"""All weights must be integers but got weight of """
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowercase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE__ = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
A_ : str = [3, 2, 4, 4]
A_ : Tuple = [4, 3, 2, 3]
A_ : int = 4
A_ : Any = 6
A_ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
A_ : str = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
A_ : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 165
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14
| 0
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = psutil.Process()
_lowerCAmelCase : int = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = -1
while True:
_lowerCAmelCase : Tuple = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = threading.Thread(target=self.peak_monitor )
_lowerCAmelCase : List[str] = True
self.thread.start()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = False
self.thread.join()
return self.cpu_memory_peak
lowerCAmelCase : List[str] = PeakCPUMemory()
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase : Any = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase : List[Any] = torch.cuda.memory_allocated(_lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**2_0
_lowerCAmelCase : Optional[int] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**2_0
_lowerCAmelCase : List[Any] = (torch.cuda.max_memory_allocated(_lowerCAmelCase ) - start_measures[str(_lowerCAmelCase )]) / 2**2_0
return measures
def lowercase (_A , _A ):
"""simple docstring"""
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(_lowerCAmelCase )]:.2f}MiB' )
_lowerCAmelCase : Dict = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 353
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25
| 0
|
def A ( ):
return 1
def A ( _lowercase ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def A ( _lowercase ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowercase )
def A ( _lowercase ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowercase )
def A ( _lowercase = 200 ):
return two_pound(_lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 182
|
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase = False ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected string as input, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
A = F"Expected boolean as use_pascal parameter, found {type(UpperCamelCase )}"
raise ValueError(UpperCamelCase )
A = input_str.split("_" )
A = 0 if use_pascal else 1
A = words[start_index:]
A = [word[0].upper() + word[1:] for word in words_to_capitalize]
A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowerCamelCase ( a_ : Optional[int] , a_ : List[Any]=False ) -> List[str]:
__SCREAMING_SNAKE_CASE :List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__SCREAMING_SNAKE_CASE :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( a_ : int , a_ : Union[str, Any] , a_ : Tuple=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE :int = ''''''
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE :Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE :Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE :str = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE :Tuple = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE :List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE :Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE :Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE :List[str] = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( a_ : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE :List[str] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE :Optional[Any] = dct.pop(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = val
def __lowerCamelCase ( ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE :Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( a_ : Tuple , a_ : Dict , a_ : Tuple=False ) -> Any:
__SCREAMING_SNAKE_CASE :Any = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=lowerCamelCase__ , )
__SCREAMING_SNAKE_CASE :Dict = ViTHybridConfig(backbone_config=lowerCamelCase__ , image_size=3_84 , num_labels=10_00 )
__SCREAMING_SNAKE_CASE :Optional[Any] = False
# load original model from timm
__SCREAMING_SNAKE_CASE :Dict = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__SCREAMING_SNAKE_CASE :Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Dict = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE :str = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE :str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE :List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE :Tuple = idalabel
__SCREAMING_SNAKE_CASE :Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__SCREAMING_SNAKE_CASE :Optional[Any] = ViTHybridModel(lowerCamelCase__ ).eval()
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = ViTHybridForImageClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# create image processor
__SCREAMING_SNAKE_CASE :Dict = create_transform(**resolve_data_config({} , model=lowerCamelCase__ ) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = transform.transforms
__SCREAMING_SNAKE_CASE :str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__SCREAMING_SNAKE_CASE :Any = ViTHybridImageProcessor(
do_resize=lowerCamelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCamelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__SCREAMING_SNAKE_CASE :Tuple = prepare_img()
__SCREAMING_SNAKE_CASE :int = transform(lowerCamelCase__ ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE :Optional[int] = processor(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ )
# verify logits
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Dict = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
__SCREAMING_SNAKE_CASE :int = timm_model.forward_features(lowerCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase__ , outputs.pooler_output , atol=1e-3 )
else:
__SCREAMING_SNAKE_CASE :Optional[int] = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
lowerCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 353
|
"""simple docstring"""
def __lowerCamelCase ( a_ : str , a_ : str ) -> str:
__SCREAMING_SNAKE_CASE :int = len(a_ )
__SCREAMING_SNAKE_CASE :int = len(a_ )
__SCREAMING_SNAKE_CASE :int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE :list = []
for char_count in range(a_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 239
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = PhobertTokenizer
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : List[Any] = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
__A : List[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Union[str, Any] = ['#version: 0.2', 'l à</w>']
__A : str = {'unk_token': '<unk>'}
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = 'Tôi là VinAI Research'
__A : Dict = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__A : Union[str, Any] = 'Tôi là VinAI Research'
__A : List[str] = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
__A : Tuple = tokenizer.tokenize(_UpperCAmelCase)
print(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = tokens + [tokenizer.unk_token]
__A : int = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , _UpperCAmelCase)
| 190
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = tempfile.mkdtemp()
__A : str = 5
# Realm tok
__A : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
__A : Tuple = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__A : int = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records)
return config
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_UpperCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_config()
__A : str = self.get_dummy_retriever()
__A : List[str] = retriever.tokenizer
__A : Dict = np.array([0, 3] , dtype='long')
__A : Dict = tokenizer(['Test question']).input_ids
__A : Optional[Any] = tokenizer(
['the fourth'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : str = config.reader_seq_len
__A ,__A ,__A ,__A : List[str] = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_config()
__A : Any = self.get_dummy_retriever()
__A : str = retriever.tokenizer
__A : Dict = np.array([0, 3, 5] , dtype='long')
__A : Tuple = tokenizer(['Test question']).input_ids
__A : Union[str, Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : Dict = config.reader_seq_len
__A ,__A ,__A ,__A : str = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
__A : str = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
__A : int = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
__A : Tuple = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record')
| 190
| 1
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
a : Tuple = 0
a : Union[str, Any] = [0]
a : int = [0]
a : Dict = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : Dict = [60]
a : List[Any] = [10]
a : Tuple = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : int = 3
a : Any = [1, 2, 3]
a : List[Any] = [3, 2, 1]
a : Union[str, Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Optional[Any]):
a : List[Any] = 50
a : List[Any] = [60, 100, 120]
a : Union[str, Any] = [10, 20, 30]
a : Dict = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 226
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[int] = TapasConfig.from_json_file(A_ )
# set absolute/relative position embeddings parameter
a : str = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a : Dict = TapasForQuestionAnswering(config=A_ )
elif task == "WTQ":
# run_task_main.py hparams
a : Any = 4
a : Dict = True
# hparam_utils.py hparams
a : str = 0.6_6_4_6_9_4
a : Optional[int] = 0.2_0_7_9_5_1
a : Optional[Any] = 0.1_2_1_1_9_4
a : Union[str, Any] = True
a : int = True
a : Tuple = False
a : Dict = 0.0_3_5_2_5_1_3
a : List[str] = TapasForQuestionAnswering(config=A_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a : Union[str, Any] = 4
a : List[Any] = False
# hparam_utils.py hparams
a : Dict = 3_6.4_5_1_9
a : List[str] = 0.9_0_3_4_2_1
a : Optional[Any] = 2_2_2.0_8_8
a : Dict = True
a : Union[str, Any] = True
a : List[str] = True
a : List[str] = 0.7_6_3_1_4_1
a : Any = TapasForQuestionAnswering(config=A_ )
elif task == "TABFACT":
a : int = TapasForSequenceClassification(config=A_ )
elif task == "MLM":
a : int = TapasForMaskedLM(config=A_ )
elif task == "INTERMEDIATE_PRETRAINING":
a : List[Any] = TapasModel(config=A_ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A_ , A_ , A_ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
a : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(A_ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 226
| 1
|
import torch
from diffusers import DiffusionPipeline
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def __call__( self ) -> List[Any]:
lowerCamelCase : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Optional[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase : int = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase : Optional[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase__ )
return result
| 48
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """microsoft/speecht5_tts"""
_lowerCamelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_lowerCamelCase = """text_reader"""
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ["""text"""]
_lowerCamelCase = ["""audio"""]
def UpperCamelCase__( self ):
'''simple docstring'''
if self.post_processor is None:
__A : List[str] = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = self.pre_processor(text=__lowerCamelCase , return_tensors='''pt''' , truncation=__lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__A : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__A : int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__lowerCamelCase ).cpu().detach()
| 179
| 0
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCAmelCase__ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase__ = re.compile(r"""^\s*else:""")
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
_UpperCAmelCase = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
_UpperCAmelCase = re.findall("""\[([^\]]+)\]""" ,__SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_UpperCAmelCase = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
_UpperCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_UpperCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
_UpperCAmelCase = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
_UpperCAmelCase = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
def find_duplicates(lowercase ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase = []
for key in import_dict_objects.keys():
_UpperCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase = "base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
_UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"""__init__.py""" )
_UpperCAmelCase = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
_UpperCAmelCase = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("""\n\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_UpperCAmelCase = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = short_path.replace(os.path.sep ,""".""" )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
UpperCAmelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCAmelCase ( ):
"""simple docstring"""
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase = importlib.util.spec_from_file_location(
"""transformers""" ,os.path.join(__SCREAMING_SNAKE_CASE ,"""__init__.py""" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
_UpperCAmelCase = spec.loader.load_module()
_UpperCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = "\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 369
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25
| 0
|
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
set_seed(7_70)
lowerCamelCase_ : str = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
lowerCamelCase_ : Any = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
lowerCamelCase_ : str = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase_ : Any = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowerCamelCase_ : Dict = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def A__ ( lowerCamelCase , lowerCamelCase=False ) -> int:
UpperCamelCase_: Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
hf_hub_download(repo_id=lowerCamelCase , filename=lowerCamelCase , local_dir=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase="text" ) -> Optional[int]:
if model_type == "text":
UpperCamelCase_: str = BarkSemanticModel
UpperCamelCase_: Dict = BarkSemanticConfig
UpperCamelCase_: int = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCamelCase_: str = BarkCoarseModel
UpperCamelCase_: int = BarkCoarseConfig
UpperCamelCase_: Any = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCamelCase_: Optional[Any] = BarkFineModel
UpperCamelCase_: int = BarkFineConfig
UpperCamelCase_: Dict = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCamelCase_: str = F'''{model_type}_small''' if use_small else model_type
UpperCamelCase_: List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCamelCase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
UpperCamelCase_: int = torch.load(lowerCamelCase , map_location=lowerCamelCase )
# this is a hack
UpperCamelCase_: Tuple = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
UpperCamelCase_: int = model_args["""vocab_size"""]
UpperCamelCase_: Optional[int] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCamelCase_: Tuple = model_args.pop("""n_head""" )
UpperCamelCase_: Dict = model_args.pop("""n_embd""" )
UpperCamelCase_: List[str] = model_args.pop("""n_layer""" )
UpperCamelCase_: Optional[Any] = ConfigClass(**checkpoint["""model_args"""] )
UpperCamelCase_: Optional[Any] = ModelClass(config=lowerCamelCase )
UpperCamelCase_: List[Any] = GenerationConfigClass()
UpperCamelCase_: Optional[Any] = model_generation_config
UpperCamelCase_: Optional[int] = checkpoint["""model"""]
# fixup checkpoint
UpperCamelCase_: Dict = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
UpperCamelCase_: Optional[int] = k[len(lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
UpperCamelCase_: Dict = new_k.replace(lowerCamelCase , new_layer_name_dict[old_layer_name] )
UpperCamelCase_: List[str] = state_dict.pop(lowerCamelCase )
UpperCamelCase_: Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCamelCase_: Dict = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
UpperCamelCase_: Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCamelCase_: Union[str, Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowerCamelCase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(lowerCamelCase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
UpperCamelCase_: str = model.num_parameters(exclude_embeddings=lowerCamelCase )
UpperCamelCase_: int = checkpoint["""best_val_loss"""].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCamelCase , 3 )} loss''' )
model.eval()
model.to(lowerCamelCase )
del checkpoint, state_dict
return model
def A__ ( lowerCamelCase , lowerCamelCase=False , lowerCamelCase="text" ) -> Any:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCamelCase_: Union[str, Any] = """cpu""" # do conversion on cpu
UpperCamelCase_: int = _get_ckpt_path(lowerCamelCase , use_small=lowerCamelCase )
UpperCamelCase_: Dict = _load_model(lowerCamelCase , lowerCamelCase , model_type=lowerCamelCase , use_small=lowerCamelCase )
# load bark initial model
UpperCamelCase_: List[Any] = _bark_load_model(lowerCamelCase , """cpu""" , model_type=lowerCamelCase , use_small=lowerCamelCase )
if model_type == "text":
UpperCamelCase_: Tuple = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
UpperCamelCase_: Optional[Any] = 5
UpperCamelCase_: List[str] = 10
if model_type in ["text", "coarse"]:
UpperCamelCase_: int = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
UpperCamelCase_: Tuple = bark_model(lowerCamelCase )[0]
UpperCamelCase_: Optional[Any] = model(lowerCamelCase )
# take last logits
UpperCamelCase_: Union[str, Any] = output_new_model_total.logits[:, [-1], :]
else:
UpperCamelCase_: Tuple = 3
UpperCamelCase_: List[Any] = 8
UpperCamelCase_: List[str] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCamelCase_: int = model(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Any = bark_model(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> str:
UpperCamelCase_: List[str] = os.path.join(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) )
UpperCamelCase_: List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) )
UpperCamelCase_: Optional[int] = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""" ) )
UpperCamelCase_: Any = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
UpperCamelCase_: Optional[Any] = BarkSemanticModel.from_pretrained(lowerCamelCase )
UpperCamelCase_: Tuple = BarkCoarseModel.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[str] = BarkFineModel.from_pretrained(lowerCamelCase )
UpperCamelCase_: Tuple = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
UpperCamelCase_: int = BarkConfig.from_sub_model_configs(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[int] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCamelCase_: Optional[Any] = BarkModel(lowerCamelCase )
UpperCamelCase_: int = semantic
UpperCamelCase_: Tuple = coarseAcoustic
UpperCamelCase_: Optional[int] = fineAcoustic
UpperCamelCase_: Any = codec
UpperCamelCase_: Dict = bark_generation_config
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
bark.save_pretrained(lowerCamelCase , repo_id=lowerCamelCase , push_to_hub=lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowerCamelCase_ : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 223
| 1
|
'''simple docstring'''
import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , ) -> np.ndarray:
lowerCamelCase__ : Tuple = np.shape(UpperCamelCase )
lowerCamelCase__ : Optional[int] = np.shape(UpperCamelCase )
lowerCamelCase__ : List[Any] = np.shape(UpperCamelCase )
if shape_a[0] != shape_b[0]:
lowerCamelCase__ : List[str] = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCamelCase )
if shape_b[1] != shape_c[1]:
lowerCamelCase__ : Tuple = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = pseudo_inv
if a_inv is None:
try:
lowerCamelCase__ : str = np.linalg.inv(UpperCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase__ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase__ : Optional[Any] = np.array([[2, 1], [6, 3]] )
lowerCamelCase__ : Dict = schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = np.block([[a, b], [b.T, c]] )
lowerCamelCase__ : List[Any] = np.linalg.det(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = np.linalg.det(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = np.linalg.det(UpperCamelCase__ )
self.assertAlmostEqual(UpperCamelCase__ , det_a * det_s )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase__ : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase__ : str = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase__ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase__ : List[str] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 41
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase : int = None
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Dict = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
_lowercase : int = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_lowercase : Tuple = "▁"
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = AlbertTokenizer
def __init__( self : Optional[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : str=None , lowercase_ : Any=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=False , lowercase_ : Optional[int]="[CLS]" , lowercase_ : Any="[SEP]" , lowercase_ : int="<unk>" , lowercase_ : Any="[SEP]" , lowercase_ : int="<pad>" , lowercase_ : Tuple="[CLS]" , lowercase_ : Dict="[MASK]" , **lowercase_ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase_ : Tuple = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
lowercase_ : Optional[int] = do_lower_case
lowercase_ : Any = remove_space
lowercase_ : Dict = keep_accents
lowercase_ : Optional[int] = vocab_file
lowercase_ : Any = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 239
| 0
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowercase : List[Any] = logging.getLogger(__name__)
_lowercase : List[Any] = 50 # max width of layer names
_lowercase : Tuple = 70 # max width of quantizer names
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=A , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=A , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=A , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=A , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=A , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=A , type=A , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=A , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
if args.calibrator == "max":
UpperCAmelCase = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCAmelCase = '''histogram'''
elif args.calibrator == "mse":
UpperCAmelCase = '''histogram'''
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(A )
quant_nn.QuantLinear.set_default_quant_desc_weight(A )
def lowerCamelCase__ ( A : List[Any] , A : Any , A : Union[str, Any]=False , A : int=False ):
'''simple docstring'''
logger.info('''Configuring Model for Quantization''' )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(A , ['''embeddings'''] , which='''weight''' , _disabled=A )
if args.quant_disable:
set_quantizer_by_name(A , [''''''] , _disabled=A )
if args.quant_disable_keyword:
set_quantizer_by_name(A , args.quant_disable_keyword , _disabled=A )
if args.quant_disable_layer_module:
set_quantizer_by_name(A , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=A )
if args.quant_enable_layer_module:
set_quantizer_by_name(A , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=A )
if args.recalibrate_weights:
recalibrate_weights(A )
if args.fuse_qkv:
fuse_qkv(A , A )
if args.clip_gelu:
clip_gelu(A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(A )
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def lowerCamelCase__ ( A : Tuple , A : str ):
'''simple docstring'''
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(A )
def lowerCamelCase__ ( A : int , A : Optional[Any] ):
'''simple docstring'''
def fusea(A : Union[str, Any] , A : Optional[Any] , A : str ):
for mod in [qq, qk, qv]:
if not hasattr(A , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(A , A , A )
qq._amax.fill_(A )
qk._amax.fill_(A )
qv._amax.fill_(A )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase__ ( A : int , A : Tuple ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(A , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(A , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(A , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=A , keepdims=A ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
UpperCAmelCase = amax
def lowerCamelCase__ ( A : List[str] , A : Union[str, Any]=25 , A : Tuple=1_80 , A : List[Any]=None ):
'''simple docstring'''
if ignore is None:
UpperCAmelCase = []
elif not isinstance(A , A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(A , '''weight''' ):
continue
UpperCAmelCase = max(A , len(A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(A , '''_input_quantizer''' , A )
UpperCAmelCase = getattr(A , '''_weight_quantizer''' , A )
if not hasattr(A , '''weight''' ):
continue
if type(A ) in ignore:
continue
if [True for s in ignore if type(A ) is str and s in name]:
continue
UpperCAmelCase = f"""Act:{input_q.extra_repr()}"""
UpperCAmelCase = f"""Wgt:{weight_q.extra_repr()}"""
UpperCAmelCase = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(A ) <= line_width:
logger.info(A )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(A , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def lowerCamelCase__ ( A : Any , A : Any , A : Dict , A : List[str] , A : Any ):
'''simple docstring'''
UpperCAmelCase = getattr(A , A , A )
if quantizer_mod is not None:
assert hasattr(A , A )
setattr(A , A , A )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def lowerCamelCase__ ( A : Dict , A : List[str] , A : Optional[Any]="both" , **A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(A , A , '''_input_quantizer''' , A , A )
if which in ["weight", "both"]:
set_quantizer(A , A , '''_weight_quantizer''' , A , A )
logger.info(A )
def lowerCamelCase__ ( A : Any , A : Tuple , **A : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(A , '''_input_quantizer''' ) or hasattr(A , '''_weight_quantizer''' ):
for n in names:
if re.search(A , A ):
set_quantizers(A , A , **A )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(A , A ):
UpperCAmelCase = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(A , A , A )
logger.info(A )
| 367
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowercase : List[str] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( A : str , A : str , A : List[Any]=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase = random.Random()
UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase = []
for _ in range(A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase = np.array(A , dtype=jnp.intaa ).reshape(A )
return output
def lowerCamelCase__ ( A : int , A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase = ids_tensor(A , vocab_size=2 , rng=A )
# make sure that at least one token is attended to for each batch
UpperCAmelCase = 1
return attn_mask
@require_flax
class UpperCamelCase__:
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[Any] = ()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase = 2
UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase = jnp.ones_like(lowerCAmelCase )
UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = pt_model_class(lowerCAmelCase ).eval()
UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase , flax_model.params )
UpperCAmelCase = flax_model.generate(lowerCAmelCase ).sequences
UpperCAmelCase = pt_model.generate(torch.tensor(lowerCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
UpperCAmelCase = 0.8
UpperCAmelCase = 10
UpperCAmelCase = 0.3
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = 2
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase = '''Hello world'''
UpperCAmelCase = tokenizer(lowerCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase , '''do_samples''' ):
model.generate(lowerCAmelCase , do_samples=lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase , '''foo''' ):
UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(lowerCAmelCase , **lowerCAmelCase )
| 91
| 0
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , a_ : int , a_ : Tuple=13 , a_ : int=7 , a_ : Any=True , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : List[str]=True , a_ : int=99 , a_ : int=[1, 1, 2] , a_ : Optional[int]=1 , a_ : Optional[Any]=32 , a_ : int=4 , a_ : int=8 , a_ : Any=37 , a_ : Tuple="gelu_new" , a_ : Dict=0.1 , a_ : List[Any]=0.1 , a_ : str=0.0 , a_ : Dict=5_12 , a_ : Optional[int]=3 , a_ : Optional[Any]=0.0_2 , a_ : Optional[Any]=3 , a_ : str=4 , a_ : List[str]=None , a_ : Tuple=False , ):
'''simple docstring'''
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Dict = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : str = block_sizes
__UpperCAmelCase : Optional[Any] = num_decoder_layers
__UpperCAmelCase : List[str] = d_model
__UpperCAmelCase : List[str] = n_head
__UpperCAmelCase : str = d_head
__UpperCAmelCase : str = d_inner
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Dict = hidden_dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : str = scope
__UpperCAmelCase : Dict = initializer_std
# Used in the tests to check the size of the first attention layer
__UpperCAmelCase : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
__UpperCAmelCase : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__UpperCAmelCase : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__UpperCAmelCase : Tuple = self.num_hidden_layers + 2
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self : Tuple , a_ : List[str] , a_ : Union[str, Any] , a_ : Tuple , a_ : Dict , a_ : int , a_ : Optional[Any] , a_ : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Dict = TFFunnelModel(config=a_ )
__UpperCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : Tuple = model(a_ )
__UpperCAmelCase : Optional[int] = [input_ids, input_mask]
__UpperCAmelCase : Dict = model(a_ )
__UpperCAmelCase : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : int = TFFunnelModel(config=a_ )
__UpperCAmelCase : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : str = TFFunnelModel(config=a_ )
__UpperCAmelCase : Optional[int] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case__ ( self : Dict , a_ : Optional[int] , a_ : Tuple , a_ : List[str] , a_ : str , a_ : Tuple , a_ : Any , a_ : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : Any = TFFunnelBaseModel(config=a_ )
__UpperCAmelCase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : List[str] = model(a_ )
__UpperCAmelCase : Optional[Any] = [input_ids, input_mask]
__UpperCAmelCase : str = model(a_ )
__UpperCAmelCase : Optional[int] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = TFFunnelBaseModel(config=a_ )
__UpperCAmelCase : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : str = TFFunnelBaseModel(config=a_ )
__UpperCAmelCase : int = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case__ ( self : Any , a_ : Optional[int] , a_ : Dict , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : str , a_ : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFFunnelForPreTraining(config=a_ )
__UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : List[str] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Tuple , a_ : List[str] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : Any , a_ : Any , a_ : Union[str, Any] , ):
'''simple docstring'''
__UpperCAmelCase : int = TFFunnelForMaskedLM(config=a_ )
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : Dict = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : str , a_ : Tuple , a_ : Union[str, Any] , a_ : List[str] , a_ : int , a_ : List[Any] , a_ : Tuple , a_ : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Any = TFFunnelForSequenceClassification(config=a_ )
__UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : List[Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : int , a_ : Union[str, Any] , a_ : Any , a_ : Dict , a_ : Any , a_ : Any , a_ : List[str] , a_ : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : List[str] = TFFunnelForMultipleChoice(config=a_ )
__UpperCAmelCase : Any = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : str = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[int] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCAmelCase : Dict = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : str , a_ : Any , a_ : List[Any] , a_ : Any , a_ : Optional[int] , a_ : Optional[int] , a_ : int , a_ : int , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : int = TFFunnelForTokenClassification(config=a_ )
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : Dict = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : Dict , a_ : Optional[int] , a_ : Any , a_ : Dict , a_ : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : int = TFFunnelForQuestionAnswering(config=a_ )
__UpperCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : List[str] = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[str] = config_and_inputs
__UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TFFunnelModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@require_tf
class UpperCAmelCase__ ( __UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = TFFunnelModelTester(self , base=a_ )
__UpperCAmelCase : str = ConfigTester(self , config_class=a_ )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
| 226
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__A =""
__A =""
__A =""
__A =1 # (0 is vertical, 1 is horizontal)
def a ( ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for index, image in enumerate(_UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase : Any = random_chars(32 )
__UpperCAmelCase : List[str] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase : Optional[Any] = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(_UpperCAmelCase )} with {file_name}' )
__UpperCAmelCase : Optional[Any] = []
for anno in new_annos[index]:
__UpperCAmelCase : Union[str, Any] = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_UpperCAmelCase )
with open(f'/{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( _UpperCAmelCase : str , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , '''*.txt''' ) ):
__UpperCAmelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
__UpperCAmelCase : List[str] = in_file.readlines()
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCAmelCase , f'{label_name}.jpg' )
__UpperCAmelCase : str = []
for obj_list in obj_lists:
__UpperCAmelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def a ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int = 1 ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Any = []
for idx in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : List[Any] = img_list[idx]
path_list.append(_UpperCAmelCase )
__UpperCAmelCase : str = anno_list[idx]
__UpperCAmelCase : str = cva.imread(_UpperCAmelCase )
if flip_type == 1:
__UpperCAmelCase : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__UpperCAmelCase : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__UpperCAmelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_UpperCAmelCase )
new_imgs_list.append(_UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def a ( _UpperCAmelCase : int = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase : Union[str, Any] = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 226
| 1
|
import sys
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ )
lowerCAmelCase__ = [[0 for x in range(lowerCAmelCase__ )] for x in range(lowerCAmelCase__ )]
lowerCAmelCase__ = [[0 for x in range(lowerCAmelCase__ )] for x in range(lowerCAmelCase__ )]
for chain_length in range(2 , lowerCAmelCase__ ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase__ = a + chain_length - 1
lowerCAmelCase__ = sys.maxsize
for c in range(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase__ = cost
lowerCAmelCase__ = c
return matrix, sol
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if i == j:
print('A' + str(lowerCAmelCase__ ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(lowerCAmelCase__ , lowerCAmelCase__ , optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase__ , optimal_solution[i][j] + 1 , lowerCAmelCase__ )
print(')' , end=' ' )
def __lowerCamelCase ( ):
lowerCAmelCase__ = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
lowerCAmelCase__ = len(lowerCAmelCase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase__ , lowerCAmelCase__ = matrix_chain_order(lowerCAmelCase__ )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 119
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase : Optional[List[str]] = None
__lowerCAmelCase : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase : List[str] = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] = "PIL.Image.Image"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
SCREAMING_SNAKE_CASE_ : str = field(default="""Image""" , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Any ) -> List[str]:
return self.pa_type
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
a = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : Tuple=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
a = {}
a , a = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(__lowerCamelCase ):
a = PIL.Image.open(__lowerCamelCase )
else:
a = path.split("::" )[-1]
try:
a = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
a = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
a = None
with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f:
a = BytesIO(f.read() )
a = PIL.Image.open(bytes_ )
else:
a = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
a = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
a = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
a = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
a = storage.field("bytes" )
else:
a = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
a = storage.field("path" )
else:
a = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
a = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
a = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
a = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __UpperCAmelCase ( self : int , __lowerCamelCase : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : str ):
with xopen(__lowerCamelCase , "rb" ) as f:
a = f.read()
return bytes_
a = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __magic_name__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
a = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __magic_name__ ( A : "PIL.Image.Image" ):
'''simple docstring'''
a = BytesIO()
if image.format in list_image_compression_formats():
a = image.format
else:
a = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(A, format=A )
return buffer.getvalue()
def __magic_name__ ( A : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(A, "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(A )}
def __magic_name__ ( A : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
a = array.dtype
a = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
a = dtype.kind
a = dtype.itemsize
a = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
a = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
a = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
a = dtype_byteorder + dtype_kind + str(A )
a = np.dtype(A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
a = PIL.Image.fromarray(array.astype(A ) )
return {"path": None, "bytes": image_to_bytes(A )}
def __magic_name__ ( A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
a , a = first_non_null_value(A )
if isinstance(A, A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(A, np.ndarray ):
a = no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
elif isinstance(A, PIL.Image.Image ):
a = no_op_if_value_is_null(A )
return [obj_to_image_dict_func(A ) for obj in objs]
else:
return objs
else:
return objs
| 107
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a = 'sshleifer/bart-tiny-random'
__a = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=SCREAMING_SNAKE_CASE_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ , *lowercase_ = create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
create_student_by_copying_alternating_layers(SCREAMING_SNAKE_CASE_ , tempfile.mkdtemp() , e=SCREAMING_SNAKE_CASE_ , d=SCREAMING_SNAKE_CASE_ )
| 30
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int]=13 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : Tuple=37 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Optional[Any]=10 ,lowerCamelCase__ : int=0.02 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Dict=2 ,) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDeiTModel(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__snake_case : int = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__snake_case : int = False
__snake_case : Union[str, Any] = False
__snake_case : List[Any] = False
__snake_case : Any = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,tf.keras.layers.Dense ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowercase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 193
|
import json
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
SCREAMING_SNAKE_CASE = """| metric |"""
SCREAMING_SNAKE_CASE = """|--------|"""
SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals["""new"""]
SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 193
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowercase : List[str] , lowercase : Any=7 , lowercase : int=3 , lowercase : Union[str, Any]=18 , lowercase : Dict=30 , lowercase : List[str]=400 , lowercase : str=True , lowercase : int=None , lowercase : Tuple=True , ):
"""simple docstring"""
lowercase_ :List[str] = size if size is not None else {"height": 18, "width": 18}
lowercase_ :List[str] = parent
lowercase_ :List[str] = batch_size
lowercase_ :int = num_channels
lowercase_ :int = image_size
lowercase_ :Any = min_resolution
lowercase_ :Tuple = max_resolution
lowercase_ :List[Any] = do_resize
lowercase_ :Any = size
lowercase_ :int = apply_ocr
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "apply_ocr" ) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowercase_ :Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowercase_ :Tuple = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowercase )
self.assertIsInstance(encoding.boxes , lowercase )
# Test batched
lowercase_ :str = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowercase_ :List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase_ :Any = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowercase_ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase_ :int = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase_ :str = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowercase_ :Optional[int] = Image.open(ds[0]["file"] ).convert("RGB" )
lowercase_ :str = image_processing(lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase_ :List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowercase_ :Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase )
self.assertListEqual(encoding.boxes , lowercase )
# with apply_OCR = False
lowercase_ :str = LayoutLMvaImageProcessor(apply_ocr=lowercase )
lowercase_ :str = image_processing(lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 223
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ):
return x + 2
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = "x = 3"
lowercase_ :Any = {}
lowercase_ :Any = evaluate(lowercase , {} , state=lowercase )
assert result == 3
self.assertDictEqual(lowercase , {"x": 3} )
lowercase_ :Dict = "x = y"
lowercase_ :Dict = {"y": 5}
lowercase_ :str = evaluate(lowercase , {} , state=lowercase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase , {"x": 5, "y": 5} )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Dict = "y = add_two(x)"
lowercase_ :Optional[Any] = {"x": 3}
lowercase_ :int = evaluate(lowercase , {"add_two": add_two} , state=lowercase )
assert result == 5
self.assertDictEqual(lowercase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase_ :List[Any] = evaluate(lowercase , {} , state=lowercase )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Optional[int] = "x = 3"
lowercase_ :Any = {}
lowercase_ :Any = evaluate(lowercase , {} , state=lowercase )
assert result == 3
self.assertDictEqual(lowercase , {"x": 3} )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[Any] = "test_dict = {'x': x, 'y': add_two(x)}"
lowercase_ :List[str] = {"x": 3}
lowercase_ :Any = evaluate(lowercase , {"add_two": add_two} , state=lowercase )
self.assertDictEqual(lowercase , {"x": 3, "y": 5} )
self.assertDictEqual(lowercase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = "x = 3\ny = 5"
lowercase_ :Union[str, Any] = {}
lowercase_ :Optional[int] = evaluate(lowercase , {} , state=lowercase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase , {"x": 3, "y": 5} )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :List[str] = "text = f'This is x: {x}.'"
lowercase_ :int = {"x": 3}
lowercase_ :int = evaluate(lowercase , {} , state=lowercase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowercase , {"x": 3, "text": "This is x: 3."} )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :List[Any] = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowercase_ :List[str] = {"x": 3}
lowercase_ :Union[str, Any] = evaluate(lowercase , {} , state=lowercase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowercase , {"x": 3, "y": 2} )
lowercase_ :List[str] = {"x": 8}
lowercase_ :Tuple = evaluate(lowercase , {} , state=lowercase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase , {"x": 8, "y": 5} )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Any = "test_list = [x, add_two(x)]"
lowercase_ :Optional[int] = {"x": 3}
lowercase_ :List[str] = evaluate(lowercase , {"add_two": add_two} , state=lowercase )
self.assertListEqual(lowercase , [3, 5] )
self.assertDictEqual(lowercase , {"x": 3, "test_list": [3, 5]} )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :str = "y = x"
lowercase_ :Any = {"x": 3}
lowercase_ :Tuple = evaluate(lowercase , {} , state=lowercase )
assert result == 3
self.assertDictEqual(lowercase , {"x": 3, "y": 3} )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :List[Any] = "test_list = [x, add_two(x)]\ntest_list[1]"
lowercase_ :Dict = {"x": 3}
lowercase_ :Any = evaluate(lowercase , {"add_two": add_two} , state=lowercase )
assert result == 5
self.assertDictEqual(lowercase , {"x": 3, "test_list": [3, 5]} )
lowercase_ :Union[str, Any] = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowercase_ :List[Any] = {"x": 3}
lowercase_ :List[Any] = evaluate(lowercase , {"add_two": add_two} , state=lowercase )
assert result == 5
self.assertDictEqual(lowercase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Dict = "x = 0\nfor i in range(3):\n x = i"
lowercase_ :Any = {}
lowercase_ :Tuple = evaluate(lowercase , {"range": range} , state=lowercase )
assert result == 2
self.assertDictEqual(lowercase , {"x": 2, "i": 2} )
| 223
| 1
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : Union[str, Any] ):
snake_case_ : Optional[Any] = ''''''
snake_case_ : Union[str, Any] = ''''''
snake_case_ : str = []
snake_case_ : Union[str, Any] = 0
snake_case_ : Dict = 256
snake_case_ : List[Any] = 0
snake_case_ : str = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = 0
def _snake_case ( self : Tuple , lowercase_ : Dict ):
snake_case_ : Optional[int] = cva.imread(lowercase_ , 0 )
snake_case_ : Dict = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : Dict = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : Tuple = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : str = x[i] / self.k
self.sk += prk
snake_case_ : Union[str, Any] = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : List[Any] = int(last % last )
snake_case_ : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : Union[str, Any] = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : str = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : str = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : Optional[int] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : str ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : Any ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Tuple = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 155
|
"""simple docstring"""
def __lowercase ( _a , _a ):
return base * power(_a , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowercase__ : Optional[Any] = int(input('''Enter the base: ''').strip())
lowercase__ : int = int(input('''Enter the exponent: ''').strip())
lowercase__ : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase__ : Any = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 155
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if gpta_config_file == "":
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaConfig()
else:
SCREAMING_SNAKE_CASE_ : Tuple = GPTaConfig.from_json_file(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaModel(__a )
# Load weights from numpy
load_tf_weights_in_gpta(__a , __a , __a )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ : List[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , __a )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 91
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 210
|
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__UpperCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 210
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1000, ) -> List[str]:
UpperCamelCase : Optional[Any] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Any = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : str = use_input_mask
UpperCamelCase : Optional[Any] = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : str = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Optional[Any] = num_choices
UpperCamelCase : Tuple = scope
UpperCamelCase : List[Any] = range_bbox
def snake_case_ ( self ) -> str:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase : Optional[Any] = bbox[i, j, 3]
UpperCamelCase : Dict = bbox[i, j, 1]
UpperCamelCase : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase : List[Any] = bbox[i, j, 2]
UpperCamelCase : Any = bbox[i, j, 0]
UpperCamelCase : List[str] = t
UpperCamelCase : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase : Optional[int] = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : Optional[int] = TFLayoutLMModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = TFLayoutLMForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Optional[Any] = TFLayoutLMForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : Dict = TFLayoutLMForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[str] = config_and_inputs
UpperCamelCase : List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[int] = 10
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = TFLayoutLMModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> Any:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[int]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Dict = TFLayoutLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def snake_case_ ( self ) -> int:
pass
def UpperCamelCase ( ) -> int:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
UpperCamelCase : Optional[int] = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
UpperCamelCase : str = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCamelCase : Dict = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
UpperCamelCase : Optional[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCamelCase : Optional[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Any:
UpperCamelCase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase : Any = model(input_ids=SCREAMING_SNAKE_CASE_, bbox=SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
# test the sequence output on [0, :3, :3]
UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCamelCase : str = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
# initialize model with randomly initialized sequence classification head
UpperCamelCase : List[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=2 )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase : str = model(
input_ids=SCREAMING_SNAKE_CASE_, bbox=SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
UpperCamelCase : Any = outputs.loss
UpperCamelCase : List[str] = (2,)
self.assertEqual(loss.shape, SCREAMING_SNAKE_CASE_ )
# test the shape of the logits
UpperCamelCase : Any = outputs.logits
UpperCamelCase : Optional[Any] = (2, 2)
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> List[Any]:
# initialize model with randomly initialized token classification head
UpperCamelCase : Tuple = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased', num_labels=13 )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase : Optional[Any] = model(
input_ids=SCREAMING_SNAKE_CASE_, bbox=SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
# test the shape of the logits
UpperCamelCase : Optional[int] = outputs.logits
UpperCamelCase : Dict = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Dict:
# initialize model with randomly initialized token classification head
UpperCamelCase : str = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = prepare_layoutlm_batch_inputs()
# forward pass
UpperCamelCase : Optional[int] = model(input_ids=SCREAMING_SNAKE_CASE_, bbox=SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_, token_type_ids=SCREAMING_SNAKE_CASE_ )
# test the shape of the logits
UpperCamelCase : List[str] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape, SCREAMING_SNAKE_CASE_ )
self.assertEqual(outputs.end_logits.shape, SCREAMING_SNAKE_CASE_ )
| 119
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = "codegen"
UpperCAmelCase__ : str = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0400, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=28, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Optional[int] = n_ctx
UpperCamelCase : Optional[int] = n_positions
UpperCamelCase : List[str] = n_embd
UpperCamelCase : Dict = n_layer
UpperCamelCase : int = n_head
UpperCamelCase : Union[str, Any] = n_inner
UpperCamelCase : int = rotary_dim
UpperCamelCase : Optional[Any] = activation_function
UpperCamelCase : Optional[int] = resid_pdrop
UpperCamelCase : Union[str, Any] = embd_pdrop
UpperCamelCase : Optional[Any] = attn_pdrop
UpperCamelCase : List[str] = layer_norm_epsilon
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : str = use_cache
UpperCamelCase : Dict = bos_token_id
UpperCamelCase : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, tie_word_embeddings=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : str = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs' )
UpperCamelCase : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Tuple = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : List[Any] = seqlen + 2
UpperCamelCase : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : str = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : List[Any] = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : Optional[Any] = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 119
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Optional[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowerCamelCase : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowerCamelCase : List[Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowerCamelCase : Union[str, Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase : str = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowerCamelCase : List[str] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
lowerCamelCase : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowerCamelCase : str = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowerCamelCase : int = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase )
class A:
'''simple docstring'''
def __call__( self : List[str] , A_ : List[str] , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Union[bool, str] = False , A_ : Union[bool, str] = False , A_ : Optional[int] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[bool] = None , **A_ : str , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
elif titles is None or texts is None:
lowerCamelCase_ = titles if texts is None else texts
return super().__call__(
A_ , A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
lowerCamelCase_ = titles if not isinstance(A_ , A_ ) else [titles]
lowerCamelCase_ = texts if not isinstance(A_ , A_ ) else [texts]
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = questions if not isinstance(A_ , A_ ) else [questions] * n_passages
assert len(A_ ) == len(
A_ ), f"""There should be as many titles than texts but got {len(A_ )} titles and {len(A_ )} texts."""
lowerCamelCase_ = super().__call__(A_ , A_ , padding=A_ , truncation=A_ )['input_ids']
lowerCamelCase_ = super().__call__(A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ )['input_ids']
lowerCamelCase_ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A_ , A_ )
]
}
if return_attention_mask is not False:
lowerCamelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ = attention_mask
return self.pad(A_ , padding=A_ , max_length=A_ , return_tensors=A_ )
def a__ ( self : Optional[int] , A_ : BatchEncoding , A_ : DPRReaderOutput , A_ : int = 16 , A_ : int = 64 , A_ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowerCamelCase_ = reader_input['input_ids']
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reader_output[:3]
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = sorted(range(A_ ) , reverse=A_ , key=relevance_logits.__getitem__ )
lowerCamelCase_ = []
for doc_id in sorted_docs:
lowerCamelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A_ , top_spans=A_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A_ , start_index=A_ , end_index=A_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self : List[str] , A_ : List[int] , A_ : List[int] , A_ : int , A_ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
lowerCamelCase_ = []
for start_index, start_score in enumerate(A_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ = sorted(A_ , key=lambda A_ : x[1] , reverse=A_ )
lowerCamelCase_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
lowerCamelCase_ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase )
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DPRReaderTokenizer
| 355
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''transfo-xl'''
UpperCamelCase = ['''mems''']
UpperCamelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A_ : Optional[Any]=267735 , A_ : Optional[Any]=[20000, 40000, 200000] , A_ : Union[str, Any]=1024 , A_ : Optional[Any]=1024 , A_ : Optional[int]=16 , A_ : Any=64 , A_ : List[Any]=4096 , A_ : str=4 , A_ : int=False , A_ : List[Any]=18 , A_ : Optional[int]=1600 , A_ : Union[str, Any]=1000 , A_ : Optional[Any]=True , A_ : Optional[int]=True , A_ : List[str]=0 , A_ : int=-1 , A_ : List[Any]=True , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Dict=True , A_ : Dict="normal" , A_ : Dict=0.01 , A_ : Optional[Any]=0.01 , A_ : Any=0.02 , A_ : int=1E-5 , A_ : List[str]=0 , **A_ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
lowerCamelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase_ = [False] + [False] * len(self.cutoffs )
lowerCamelCase_ = d_model
lowerCamelCase_ = d_embed
lowerCamelCase_ = d_head
lowerCamelCase_ = d_inner
lowerCamelCase_ = div_val
lowerCamelCase_ = pre_lnorm
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = mem_len
lowerCamelCase_ = same_length
lowerCamelCase_ = attn_type
lowerCamelCase_ = clamp_len
lowerCamelCase_ = sample_softmax
lowerCamelCase_ = adaptive
lowerCamelCase_ = dropout
lowerCamelCase_ = dropatt
lowerCamelCase_ = untie_r
lowerCamelCase_ = init
lowerCamelCase_ = init_range
lowerCamelCase_ = proj_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , A_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 208
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''keras_nlp''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''keras_nlp'''] )
| 193
| 1
|
# using dfs for finding eulerian path traversal
def a( A : int , A : Optional[Any] , A : Any , A : Optional[int]=None ) -> List[str]:
"""simple docstring"""
a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a , a = True, True
a = dfs(A , A , A , A )
return path
def a( A : List[str] , A : Optional[int] ) -> List[str]:
"""simple docstring"""
a = 0
a = -1
for i in range(A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a( A : str , A : str ) -> List[Any]:
"""simple docstring"""
a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a , a = check_circuit_or_path(A , A )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
a = 1
if check == 2:
a = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
a = dfs(A , A , A )
print(A )
def a( ) -> int:
"""simple docstring"""
a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a = {
1: [],
2: []
# all degree is zero
}
a = 10
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
if __name__ == "__main__":
main()
| 358
|
def a( A : int = 200 ) -> int:
"""simple docstring"""
a = [1, 2, 5, 10, 20, 50, 100, 200]
a = [0] * (pence + 1)
a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 71
| 0
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase (snake_case__ : bytes , snake_case__ : int ) -> np.array:
'''simple docstring'''
lowerCAmelCase = f'''{sampling_rate}'''
lowerCAmelCase = """1"""
lowerCAmelCase = """f32le"""
lowerCAmelCase = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(snake_case__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase = ffmpeg_process.communicate(snake_case__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCAmelCase = output_stream[0]
lowerCAmelCase = np.frombuffer(snake_case__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def lowercase (snake_case__ : int , snake_case__ : float , snake_case__ : str = "f32le" , ) -> Dict:
'''simple docstring'''
lowerCAmelCase = f'''{sampling_rate}'''
lowerCAmelCase = """1"""
if format_for_conversion == "s16le":
lowerCAmelCase = 2
elif format_for_conversion == "f32le":
lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCAmelCase = platform.system()
if system == "Linux":
lowerCAmelCase = """alsa"""
lowerCAmelCase = """default"""
elif system == "Darwin":
lowerCAmelCase = """avfoundation"""
lowerCAmelCase = """:0"""
elif system == "Windows":
lowerCAmelCase = """dshow"""
lowerCAmelCase = """default"""
lowerCAmelCase = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase = _ffmpeg_stream(snake_case__ , snake_case__ )
for item in iterator:
yield item
def lowercase (snake_case__ : int , snake_case__ : float , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[Tuple[float, float], float]] = None , snake_case__ : str = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
if stream_chunk_s is not None:
lowerCAmelCase = stream_chunk_s
else:
lowerCAmelCase = chunk_length_s
lowerCAmelCase = ffmpeg_microphone(snake_case__ , snake_case__ , format_for_conversion=snake_case__ )
if format_for_conversion == "s16le":
lowerCAmelCase = np.intaa
lowerCAmelCase = 2
elif format_for_conversion == "f32le":
lowerCAmelCase = np.floataa
lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCAmelCase = chunk_length_s / 6
lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(snake_case__ , (int, float) ):
lowerCAmelCase = [stride_length_s, stride_length_s]
lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase = datetime.datetime.now()
lowerCAmelCase = datetime.timedelta(seconds=snake_case__ )
for item in chunk_bytes_iter(snake_case__ , snake_case__ , stride=(stride_left, stride_right) , stream=snake_case__ ):
# Put everything back in numpy scale
lowerCAmelCase = np.frombuffer(item["""raw"""] , dtype=snake_case__ )
lowerCAmelCase = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase (snake_case__ : Any , snake_case__ : int , snake_case__ : Tuple[int, int] , snake_case__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase = B""""""
lowerCAmelCase , lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(snake_case__ ) < chunk_len:
lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(snake_case__ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase = (_stride_left, stride_right)
lowerCAmelCase = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCAmelCase = False
yield item
lowerCAmelCase = stride_left
lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(snake_case__ ) > stride_left:
lowerCAmelCase = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCAmelCase = False
yield item
def lowercase (snake_case__ : Union[str, Any] , snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(snake_case__ , stdout=subprocess.PIPE , bufsize=snake_case__ ) as ffmpeg_process:
while True:
lowerCAmelCase = ffmpeg_process.stdout.read(snake_case__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 155
|
"""simple docstring"""
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowerCAmelCase = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 155
| 1
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowercase_ = parser.parse_args()
if args.model_type == "bert":
lowercase_ = BertForMaskedLM.from_pretrained(args.model_name)
lowercase_ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowercase_ = model.state_dict()
lowercase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowercase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
lowercase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
lowercase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
lowercase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
lowercase_ = state_dict['cls.predictions.decoder.weight']
lowercase_ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
lowercase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 194
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ = 5_0_0_0_0_0
lowercase_ ,lowercase_ = os.path.split(__file__)
lowercase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( ):
__lowerCamelCase : str = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Any = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
__lowerCamelCase : Any = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , 'dataset.arrow' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[Any] = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ ):
return tokenizer(examples['text'] )
__lowerCamelCase : str = map(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='numpy' ):
__lowerCamelCase : Union[str, Any] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='pandas' ):
__lowerCamelCase : Any = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
__lowerCamelCase : List[str] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
__lowerCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = map(SCREAMING_SNAKE_CASE__ , function=SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 194
| 1
|
import torch
from torch import nn
class _UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 , lowerCAmelCase__=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowercase = n_token
__lowercase = d_embed
__lowercase = d_proj
__lowercase = cutoffs + [n_token]
__lowercase = [0] + self.cutoffs
__lowercase = div_val
__lowercase = self.cutoffs[0]
__lowercase = len(self.cutoffs ) - 1
__lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowercase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowercase = nn.ModuleList()
__lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
else:
self.out_projs.append(lowerCAmelCase__ )
self.out_layers.append(nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase__ , r_idx - l_idx ) )
__lowercase = keep_order
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if proj is None:
__lowercase = nn.functional.linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowercase = nn.functional.linear(lowerCAmelCase__ , proj.t().contiguous() )
__lowercase = nn.functional.linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
__lowercase = hidden[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = hidden.view(-1 , hidden.size(-1 ) )
__lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__lowercase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowerCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__lowercase = labels != -1_00
__lowercase = torch.zeros_like(lowerCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
__lowercase = (
-nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowercase = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase__ )
biases.append(lowerCAmelCase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = nn.functional.log_softmax(lowerCAmelCase__ , dim=1 )
if labels is None:
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowercase = torch.zeros_like(lowerCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
__lowercase = 0
__lowercase = [0] + self.cutoffs
for i in range(len(lowerCAmelCase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowercase = (labels >= l_idx) & (labels < r_idx)
__lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowercase = labels.index_select(0 , lowerCAmelCase__ ) - l_idx
__lowercase = head_logprob.index_select(0 , lowerCAmelCase__ )
__lowercase = hidden.index_select(0 , lowerCAmelCase__ )
else:
__lowercase = hidden
if i == 0:
if labels is not None:
__lowercase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = nn.functional.log_softmax(lowerCAmelCase__ , dim=1 )
__lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowercase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowerCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase__ )
biases.append(lowerCAmelCase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowercase = nn.functional.log_softmax(lowerCAmelCase__ , dim=1 )
__lowercase = [0] + self.cutoffs
for i in range(len(lowerCAmelCase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = nn.functional.log_softmax(lowerCAmelCase__ , dim=1 )
__lowercase = head_logprob[:, -i] + tail_logprob_i
__lowercase = logprob_i
return out
| 210
|
import requests
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = {'''Content-Type''': '''application/json'''}
__lowercase = requests.post(lowercase , json={'''text''': message_body} , headers=lowercase )
if response.status_code != 200:
__lowercase = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 210
| 1
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1001 ):
'''simple docstring'''
lowercase_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase_ = 2 * i + 1
lowercase_ = 2 * i
lowercase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 352
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase ) -> float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: np.ndarray , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ = get_bounds(__lowerCamelCase , __lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: FilterType , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 512
lowercase_ = [1] + [0] * (size - 1)
lowercase_ = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) )
plt.show()
| 297
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = 4_2
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self : Optional[int], __A : int = 3_2, __A : int = 6_4, __A : int = 2_0, __A : int = 7_6_8, __A : int=7_7, __A : Tuple=4, __A : float = 0.0, __A : str = "silu", __A : Optional[str] = None, __A : Optional[str] = None, __A : Optional[str] = "linear", __A : Optional[str] = "prd", __A : Optional[int] = None, __A : Optional[int] = None, __A : Optional[int] = None, ):
super().__init__()
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Any = attention_head_dim
UpperCAmelCase : List[str] = num_attention_heads * attention_head_dim
UpperCAmelCase : int = additional_embeddings
UpperCAmelCase : Optional[Any] = time_embed_dim or inner_dim
UpperCAmelCase : Dict = embedding_proj_dim or embedding_dim
UpperCAmelCase : Tuple = clip_embed_dim or embedding_dim
UpperCAmelCase : Union[str, Any] = Timesteps(_a, _a, 0 )
UpperCAmelCase : Tuple = TimestepEmbedding(_a, _a, out_dim=_a, act_fn=_a )
UpperCAmelCase : int = nn.Linear(_a, _a )
if embedding_proj_norm_type is None:
UpperCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase : Dict = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase : List[str] = nn.Linear(_a, _a )
if encoder_hid_proj_type is None:
UpperCAmelCase : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase : List[Any] = nn.Linear(_a, _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase : str = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, _a ) )
if added_emb_type == "prd":
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.zeros(1, 1, _a ) )
elif added_emb_type is None:
UpperCAmelCase : List[Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase : int = nn.ModuleList(
[
BasicTransformerBlock(
_a, _a, _a, dropout=_a, activation_fn='''gelu''', attention_bias=_a, )
for d in range(_a )
] )
if norm_in_type == "layer":
UpperCAmelCase : List[Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
UpperCAmelCase : Dict = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase : List[Any] = nn.LayerNorm(_a )
UpperCAmelCase : Tuple = nn.Linear(_a, _a )
UpperCAmelCase : int = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase : List[str] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''', _a, persistent=_a )
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.zeros(1, _a ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1, _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Union[str, Any] = {}
def fn_recursive_add_processors(__A : str, __A : torch.nn.Module, __A : Dict[str, AttentionProcessor] ):
if hasattr(_a, '''set_processor''' ):
UpperCAmelCase : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''', _a, _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a, _a, _a )
return processors
def __magic_name__ ( self : int, __A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
UpperCAmelCase : List[str] = len(self.attn_processors.keys() )
if isinstance(_a, _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__A : str, __A : torch.nn.Module, __A : int ):
if hasattr(_a, '''set_processor''' ):
if not isinstance(_a, _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''', _a, _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a, _a, _a )
def __magic_name__ ( self : Tuple ):
self.set_attn_processor(AttnProcessor() )
def __magic_name__ ( self : int, __A : Any, __A : Union[torch.Tensor, float, int], __A : torch.FloatTensor, __A : Optional[torch.FloatTensor] = None, __A : Optional[torch.BoolTensor] = None, __A : bool = True, ):
UpperCAmelCase : List[Any] = hidden_states.shape[0]
UpperCAmelCase : List[Any] = timestep
if not torch.is_tensor(_a ):
UpperCAmelCase : Dict = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
UpperCAmelCase : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase : Optional[Any] = timesteps * torch.ones(_a, dtype=timesteps.dtype, device=timesteps.device )
UpperCAmelCase : List[str] = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase : Dict = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
UpperCAmelCase : List[str] = self.embedding_proj_norm(_a )
UpperCAmelCase : Optional[int] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase : Any = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCAmelCase : Optional[Any] = self.proj_in(_a )
UpperCAmelCase : Union[str, Any] = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase : Any = []
UpperCAmelCase : str = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase : Dict = hidden_states[:, None, :]
UpperCAmelCase : Any = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase : Dict = self.prd_embedding.to(hidden_states.dtype ).expand(_a, -1, -1 )
additional_embeds.append(_a )
UpperCAmelCase : Optional[Any] = torch.cat(
_a, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase : Any = F.pad(
_a, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
UpperCAmelCase : Tuple = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
UpperCAmelCase : List[Any] = F.pad(_a, (0, self.additional_embeddings), value=0.0 )
UpperCAmelCase : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
UpperCAmelCase : List[str] = self.norm_in(_a )
for block in self.transformer_blocks:
UpperCAmelCase : Union[str, Any] = block(_a, attention_mask=_a )
UpperCAmelCase : Union[str, Any] = self.norm_out(_a )
if self.prd_embedding is not None:
UpperCAmelCase : Union[str, Any] = hidden_states[:, -1]
else:
UpperCAmelCase : Optional[Any] = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase : int = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def __magic_name__ ( self : List[Any], __A : str ):
UpperCAmelCase : Optional[int] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 336
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,) -> Optional[int]:
if config_name_or_path is None:
__lowerCamelCase : List[Any] = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__lowerCamelCase : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowerCamelCase : Tuple = question_encoder_name_or_path
__lowerCamelCase : Union[str, Any] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__lowerCamelCase : Tuple = RagConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
__lowerCamelCase : Tuple = gen_config
__lowerCamelCase : List[Any] = question_encoder_config
__lowerCamelCase : str = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase ,_lowerCAmelCase ,config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
__lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 208
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''roformer'''
def __init__( self, A=50_000, A=None, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=1_536, A=2, A=0.02, A=1E-12, A=0, A=False, A=True, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, **A )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = rotary_value
SCREAMING_SNAKE_CASE : Tuple = use_cache
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: 'batch', 1: 'sequence'}
SCREAMING_SNAKE_CASE : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 362
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = ['''pixel_values''']
def __init__( self, A = True, A = None, A = PILImageResampling.BICUBIC, A = True, A = None, A = True, A = 1 / 255, A = True, A = IMAGENET_DEFAULT_MEAN, A = IMAGENET_DEFAULT_STD, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(A, default_to_square=A )
SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : int = get_size_dict(A, param_name='crop_size' )
SCREAMING_SNAKE_CASE : Any = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE : Dict = crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : List[Any] = do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self, A, A, A = PILImageResampling.BICUBIC, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = get_size_dict(A, default_to_square=A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : List[str] = int((256 / 224) * size['shortest_edge'] )
SCREAMING_SNAKE_CASE : int = get_resize_output_image_size(A, size=A, default_to_square=A )
SCREAMING_SNAKE_CASE : List[str] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
A, size=(size_dict['height'], size_dict['width']), resample=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(A, size=(size['height'], size['width']), data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A = None, **A, ):
'''simple docstring'''
return rescale(A, scale=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A, A, A = None, **A, ):
'''simple docstring'''
return normalize(A, mean=A, std=A, data_format=A, **A )
def UpperCamelCase_ ( self, A, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = None, A = ChannelDimension.FIRST, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(A, default_to_square=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(A, param_name='crop_size' )
SCREAMING_SNAKE_CASE : List[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : List[str] = [self.resize(A, A, A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : List[Any] = [self.center_crop(A, A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[str] = [self.rescale(A, A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Optional[int] = [self.normalize(A, A, A ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(A, A ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': images}
return BatchFeature(data=A, tensor_type=A )
| 246
| 0
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCAmelCase_ = True
except ImportError:
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowerCamelCase__ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowerCamelCase__ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any]=None , *_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = testing
UpperCAmelCase__ = testing_file
UpperCAmelCase__ = path
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase__ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
UpperCAmelCase__ = (
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase__ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
UpperCAmelCase__ = json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
UpperCAmelCase__ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
UpperCAmelCase__ = json.load(lowerCamelCase__ )
UpperCAmelCase__ = configuration['lowercase_modelname']
UpperCAmelCase__ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''' )
UpperCAmelCase__ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase__ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase__ = 'Flax' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase__ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(_UpperCAmelCase : Optional[int] ):
with open(lowerCamelCase__ , """r""" ) as f:
UpperCAmelCase__ = f.readlines()
with open(lowerCamelCase__ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
# Create temp file
UpperCAmelCase__ = mkstemp()
UpperCAmelCase__ = False
with fdopen(lowerCamelCase__ , """w""" ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
UpperCAmelCase__ = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(_UpperCAmelCase : Optional[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_UpperCAmelCase : Optional[Any] ):
with open(lowerCamelCase__ ) as datafile:
UpperCAmelCase__ = []
UpperCAmelCase__ = False
UpperCAmelCase__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase__ = line.split("""\"""" )[1]
UpperCAmelCase__ = skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase__ = line.split("""\"""" )[1]
UpperCAmelCase__ = skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__ = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase__ = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowerCamelCase__ )
| 346
|
import os
from datetime import datetime as dt
from github import Github
A_ :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def A ( ) -> Any:
__UpperCamelCase : Any =Github(os.environ['GITHUB_TOKEN'] )
__UpperCamelCase : Union[str, Any] =g.get_repo('huggingface/accelerate' )
__UpperCamelCase : Tuple =repo.get_issues(state='open' )
for issue in open_issues:
__UpperCamelCase : List[Any] =sorted([comment for comment in issue.get_comments()] ,key=lambda a_ : i.created_at ,reverse=a_ )
__UpperCamelCase : str =comments[0] if len(a_ ) > 0 else None
__UpperCamelCase : Any =dt.utcnow()
__UpperCamelCase : List[str] =(current_time - issue.updated_at).days
__UpperCamelCase : Union[str, Any] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 71
| 0
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
_snake_case = math.sqrt(__A )
_snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> np.ndarray:
_snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
_snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __A ):
for j in range(0 , __A ):
_snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__A , __A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , ) -> np.ndarray:
_snake_case = np.zeros(img.shape )
_snake_case = get_gauss_kernel(__A , __A )
_snake_case , _snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_snake_case = get_slice(__A , __A , __A , __A )
_snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
_snake_case = vec_gaussian(__A , __A )
_snake_case = np.multiply(__A , __A )
_snake_case = np.multiply(__A , __A )
_snake_case = np.sum(__A ) / np.sum(__A )
_snake_case = val
return imga
def SCREAMING_SNAKE_CASE__ ( __A ) -> tuple:
_snake_case = args[1] if args[1:] else '../image_data/lena.jpg'
_snake_case = float(args[2] ) if args[2:] else 1.0
_snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_snake_case = int(args[4] )
_snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
_snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase , lowercase , lowercase , lowercase : int = parse_args(sys.argv)
lowercase : Optional[Any] = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase : Union[str, Any] = img / 255
lowercase : Tuple = out.astype("float32")
lowercase : str = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase : Any = out * 255
lowercase : Union[str, Any] = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 160
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """marian"""
__lowercase = ["""past_key_values"""]
__lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase_=5_81_01 , lowerCAmelCase_=None , lowerCAmelCase_=10_24 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=False , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = decoder_vocab_size or vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = use_cache
_snake_case = encoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
class __UpperCAmelCase ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case = {0: 'batch'}
_snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
_snake_case = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_snake_case = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super().outputs
else:
_snake_case = super(lowerCAmelCase_ , self ).outputs
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(lowerCAmelCase_ ):
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
_snake_case = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Generate decoder inputs
_snake_case = seq_length if not self.use_past else 1
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_snake_case = dict(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
_snake_case = common_inputs['decoder_input_ids'].shape[1]
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = decoder_seq_length + 3
_snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ )] , dim=1 )
_snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case , _snake_case = self.num_layers
_snake_case = min(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = max(lowerCAmelCase_ , lowerCAmelCase_ ) - min_num_layers
_snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
) )
# TODO: test this.
_snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase_ , lowerCAmelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case , _snake_case = self.num_layers
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = common_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(lowerCAmelCase_ )
]
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case = tokenizer.num_special_tokens_to_add(lowerCAmelCase_ )
_snake_case = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case = dict(tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
else:
_snake_case = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
return common_inputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super()._flatten_past_key_values_(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = super(lowerCAmelCase_ , self )._flatten_past_key_values_(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 1E-4
| 160
| 1
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def lowerCamelCase__ ( __snake_case = "", ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''', '''''' ).lower() ).values() ) < 2
def lowerCamelCase__ ( __snake_case = "" ) -> bool:
"""simple docstring"""
if len(__snake_case ) == 0:
return True
_UpperCamelCase = input_str.replace(''' ''', '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCamelCase = {}
for character in lower_case_input_str:
_UpperCamelCase = character_freq_dict.get(__snake_case, 0 ) + 1
_UpperCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCamelCase__ ( __snake_case = "" ) -> None:
"""simple docstring"""
print('''\nFor string = ''', __snake_case, ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''', '''\tans =''', can_string_be_rearranged_as_palindrome_counter(__snake_case ), '''\ttime =''', timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''', setup='''import __main__ as z''', ), '''seconds''', )
print(
'''> can_string_be_rearranged_as_palindrome()''', '''\tans =''', can_string_be_rearranged_as_palindrome(__snake_case ), '''\ttime =''', timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''', setup='''import __main__ as z''', ), '''seconds''', )
if __name__ == "__main__":
_a = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 194
|
"""simple docstring"""
import datasets
_a = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
_a = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_a = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
}) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return {"accuracy": simple_accuracy(__a , __a)}
| 194
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase ={
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["ChineseCLIPFeatureExtractor"]
__UpperCAmelCase =["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =DebertaVaTokenizer
lowerCamelCase : Optional[int] =DebertaVaTokenizerFast
lowerCamelCase : Optional[Any] =True
lowerCamelCase : Tuple =True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = DebertaVaTokenizer(a , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict ):
"""simple docstring"""
__lowerCamelCase = '''this is a test'''
__lowerCamelCase = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = '''<pad>'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(a ) , 3_00_01 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = ''' \tHeLLo!how \n Are yoU? '''
__lowerCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = ''' \tHeLLo!how \n Are yoU? '''
__lowerCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
__lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = '''This is a test'''
__lowerCamelCase = [13, 1, 43_98, 25, 21, 12_89]
__lowerCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__lowerCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
__lowerCamelCase = DebertaVaTokenizer(a , keep_accents=a )
__lowerCamelCase = DebertaVaTokenizerFast(a , keep_accents=a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
# fmt: off
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
__lowerCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
__lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = DebertaVaTokenizer(a )
__lowerCamelCase = tokenizer.encode('''sequence builders''' )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 237
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
snake_case_ : str = '\\n\n'
snake_case_ : str = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
snake_case_ : List[str] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : int=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCamelCase : int = 'cuda'
else:
_UpperCamelCase : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
_UpperCamelCase : Dict = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Dict = model.to(lowerCamelCase__ )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCamelCase : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCamelCase : List[Any] = model.config.max_length - 1
else:
_UpperCamelCase : Tuple = model.config.max_length
_UpperCamelCase : Tuple = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
_UpperCamelCase : str = encodings['input_ids']
_UpperCamelCase : int = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Dict = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
_UpperCamelCase : str = min(start_index + batch_size ,len(lowerCamelCase__ ) )
_UpperCamelCase : Tuple = encoded_texts[start_index:end_index]
_UpperCamelCase : List[str] = attn_masks[start_index:end_index]
if add_start_token:
_UpperCamelCase : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
_UpperCamelCase : List[Any] = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
_UpperCamelCase : Tuple = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
_UpperCamelCase : Tuple = encoded_batch
with torch.no_grad():
_UpperCamelCase : Any = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
_UpperCamelCase : Dict = out_logits[..., :-1, :].contiguous()
_UpperCamelCase : List[str] = labels[..., 1:].contiguous()
_UpperCamelCase : Optional[int] = attn_mask[..., 1:].contiguous()
_UpperCamelCase : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 83
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Tuple ):
a : Optional[int] = ''
a : Optional[Any] = ''
a : str = []
a : int = 0
a : str = 2_56
a : Union[str, Any] = 0
a : Any = 0
a : Optional[int] = 0
a : List[str] = 0
def lowercase_ ( self : str , __snake_case : str ):
a : Any = cva.imread(__snake_case , 0 )
a : Optional[Any] = copy.deepcopy(self.img )
a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : Optional[int] = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : Optional[Any] = x[i] / self.k
self.sk += prk
a : str = (self.L - 1) * self.sk
if self.rem != 0:
a : Optional[int] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : str = int(np.ma.count(self.img ) / self.img[1].size )
a : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Any = self.img[j][i]
if num != self.last_list[num]:
a : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Dict ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : List[Any] ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 297
| 0
|
UpperCamelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
order.append(__UpperCAmelCase )
return order
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return component
def A ( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase_ = len(__UpperCAmelCase ) * [False]
UpperCAmelCase_ = {vert: [] for vert in range(len(__UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__UpperCAmelCase )
UpperCAmelCase_ = []
for i, was_visited in enumerate(__UpperCAmelCase ):
if not was_visited:
order += topology_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ = []
UpperCAmelCase_ = len(__UpperCAmelCase ) * [False]
for i in range(len(__UpperCAmelCase ) ):
UpperCAmelCase_ = order[len(__UpperCAmelCase ) - i - 1]
if not visited[vert]:
UpperCAmelCase_ = find_components(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
components_list.append(__UpperCAmelCase )
return components_list
| 358
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase__ : List[str] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCamelCase__ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCamelCase__ : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def __snake_case ( self , _A , _A , _A=None ) -> str:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(_A , _A , sample_weight=_A ) ),
}
| 246
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class __snake_case ( lowerCAmelCase , lowerCAmelCase ):
_a : Optional[Any]= "bit"
_a : Optional[Any]= ["preactivation", "bottleneck"]
_a : List[Any]= ["SAME", "VALID"]
def __init__( self ,snake_case=3 ,snake_case=64 ,snake_case=[256, 512, 1024, 2048] ,snake_case=[3, 4, 6, 3] ,snake_case="preactivation" ,snake_case="relu" ,snake_case=None ,snake_case=32 ,snake_case=0.0 ,snake_case=False ,snake_case=32 ,snake_case=1 ,snake_case=None ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
lowercase : Union[str, Any] = num_channels
lowercase : Optional[Any] = embedding_size
lowercase : Optional[int] = hidden_sizes
lowercase : Union[str, Any] = depths
lowercase : List[str] = layer_type
lowercase : Optional[Any] = hidden_act
lowercase : List[Any] = global_padding
lowercase : Union[str, Any] = num_groups
lowercase : Dict = drop_path_rate
lowercase : Tuple = embedding_dynamic_padding
lowercase : Optional[int] = output_stride
lowercase : str = width_factor
lowercase : Optional[int] = ["""stem"""] + [f"stage{idx}" for idx in range(1 ,len(snake_case ) + 1 )]
lowercase : List[Any] = get_aligned_output_features_output_indices(
out_features=snake_case ,out_indices=snake_case ,stage_names=self.stage_names )
| 351
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = tempfile.mkdtemp()
lowercase : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowercase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase : Any = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowercase : List[str] = os.path.join(self.tmpdirname ,snake_case )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase : str = [Image.fromarray(np.moveaxis(snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.get_tokenizer()
lowercase : Dict = self.get_rust_tokenizer()
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=snake_case )
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,snake_case )
self.assertIsInstance(processor_fast.tokenizer ,snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,snake_case )
self.assertIsInstance(processor_fast.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int = self.get_tokenizer(cls_token="""(CLS)""" ,sep_token="""(SEP)""" )
lowercase : Dict = self.get_image_processor(do_normalize=snake_case )
lowercase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token="""(CLS)""" ,sep_token="""(SEP)""" ,do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : List[str] = self.get_tokenizer()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Optional[int] = self.prepare_image_inputs()
lowercase : Tuple = image_processor(snake_case ,return_tensors="""np""" )
lowercase : Optional[int] = processor(images=snake_case ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Any = """Alexandra,T-shirt的价格是15便士。"""
lowercase : int = processor(text=snake_case )
lowercase : Dict = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.get_image_processor()
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : List[str] = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Any = self.prepare_image_inputs()
lowercase : Optional[Any] = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : str = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Any = processor.batch_decode(snake_case )
lowercase : str = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.get_image_processor()
lowercase : Dict = self.get_tokenizer()
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Optional[Any] = self.prepare_image_inputs()
lowercase : Dict = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 285
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=6 , _UpperCAmelCase=17 , _UpperCAmelCase=23 , _UpperCAmelCase=11 , _UpperCAmelCase=True , ):
__a : str = parent
__a : Any = batch_size
__a : Any = seq_length
__a : Optional[int] = act_dim
__a : Any = state_dim
__a : str = hidden_size
__a : List[str] = max_length
__a : Dict = is_training
def _lowerCamelCase ( self ):
__a : Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__a : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__a : int = floats_tensor((self.batch_size, self.seq_length, 1) )
__a : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
__a : Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__a : str = random_attention_mask((self.batch_size, self.seq_length) )
__a : Dict = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowerCamelCase ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a : Union[str, Any] = DecisionTransformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = config_and_inputs
__a : Dict = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCAmelCase = ()
__lowerCAmelCase = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCAmelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : str = DecisionTransformerModelTester(self )
__a : Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = DecisionTransformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = model_class(_UpperCAmelCase )
__a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Union[str, Any] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_UpperCAmelCase )] , _UpperCAmelCase )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : Dict = 2 # number of steps of autoregressive prediction we will perform
__a : List[str] = 10 # defined by the RL environment, may be normalized
__a : Union[str, Any] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__a : str = model.to(_UpperCAmelCase )
__a : str = model.config
torch.manual_seed(0 )
__a : List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ) # env.reset()
__a : List[str] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_UpperCAmelCase )
__a : str = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__a : str = state
__a : List[Any] = torch.zeros(1 , 0 , config.act_dim , device=_UpperCAmelCase , dtype=torch.floataa )
__a : List[Any] = torch.zeros(1 , 0 , device=_UpperCAmelCase , dtype=torch.floataa )
__a : int = torch.tensor(0 , device=_UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_UpperCAmelCase ):
__a : Optional[int] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCAmelCase )] , dim=1 )
__a : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCAmelCase )] , dim=1 )
__a : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__a , __a , __a : int = model(
states=_UpperCAmelCase , actions=_UpperCAmelCase , rewards=_UpperCAmelCase , returns_to_go=_UpperCAmelCase , timesteps=_UpperCAmelCase , attention_mask=_UpperCAmelCase , return_dict=_UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__a , __a , __a , __a : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__a : int = action_pred[0, -1]
__a : int = torch.cat([states, state] , dim=1 )
__a : Any = returns_to_go[0, -1] - reward
__a : Dict = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__a : Optional[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 160
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( a ):
def __init__( self : Any , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : DDIMScheduler):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : List[str] , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : float = 0.0 , _snake_case : int = 50 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_snake_case , )
UpperCAmelCase_ = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_snake_case)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps):
UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case)
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
# decode the image latents with the VAE
UpperCAmelCase_ = self.vqvae.decode(_snake_case).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 7
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7
| 1
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : str=True ):
model.train()
A__ = model(_lowerCamelCase )
A__ = F.mse_loss(_lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=False ):
set_seed(42 )
A__ = RegressionModel()
A__ = deepcopy(_lowerCamelCase )
A__ = RegressionDataset(length=80 )
A__ = DataLoader(_lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
A__ = AdamW(params=model.parameters() , lr=1e-3 )
A__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
A__ = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase : epoch**0.6_5 )
A__ = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
A__, A__, A__, A__ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
A__, A__ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase ( _lowerCamelCase : Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
A__, A__, A__ = get_training_setup(_lowerCamelCase )
# Use a single batch
A__, A__ = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__, A__ = accelerator.gather((ddp_input, ddp_target) )
A__, A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A__ = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def UpperCamelCase ( _lowerCamelCase : List[str] ):
# Test on distributed setup that context manager behaves properly
A__, A__, A__ = get_training_setup(_lowerCamelCase )
# Use a single batch
A__, A__ = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__, A__ = accelerator.gather((ddp_input, ddp_target) )
A__, A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A__ = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def UpperCamelCase ( _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]=False ):
A__ = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__, A__, A__ = get_training_setup(_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
A__, A__ = batch.values()
# Gather the distributed inputs and targs for the base model
A__, A__ = accelerator.gather((ddp_input, ddp_target) )
A__, A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A__ = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
GradientState._reset_state()
def UpperCamelCase ( _lowerCamelCase : int=False , _lowerCamelCase : str=False ):
A__ = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__, A__, A__, A__, A__, A__, A__ = get_training_setup(_lowerCamelCase , _lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
A__, A__ = batch.values()
# Gather the distributed inputs and targs for the base model
A__, A__ = accelerator.gather((ddp_input, ddp_target) )
A__, A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
A__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def UpperCamelCase ( ):
A__ = Accelerator()
A__ = RegressionDataset(length=80 )
A__ = DataLoader(_lowerCamelCase , batch_size=16 )
A__ = RegressionDataset(length=96 )
A__ = DataLoader(_lowerCamelCase , batch_size=16 )
A__, A__ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if iteration < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if batch_num < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase ( ):
A__ = Accelerator()
A__ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 237
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class UpperCAmelCase :
__lowercase = 42
__lowercase = None
@staticmethod
def UpperCAmelCase_ ( )-> Dict:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> str:
raise NotImplementedError
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int )-> Any:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCAmelCase_ ( cls :int )-> Any:
return F"`pip install {cls.pip_package or cls.name}`"
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """optuna"""
@staticmethod
def UpperCAmelCase_ ( )-> int:
return is_optuna_available()
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :List[Any] )-> Tuple:
return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> Optional[Any]:
return default_hp_space_optuna(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """ray"""
__lowercase = """'ray[tune]'"""
@staticmethod
def UpperCAmelCase_ ( )-> str:
return is_ray_available()
def UpperCAmelCase_ ( self :int , lowercase_ :Dict , lowercase_ :int , lowercase_ :str , **lowercase_ :List[str] )-> int:
return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict )-> int:
return default_hp_space_ray(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """sigopt"""
@staticmethod
def UpperCAmelCase_ ( )-> Union[str, Any]:
return is_sigopt_available()
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> Dict:
return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]:
return default_hp_space_sigopt(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """wandb"""
@staticmethod
def UpperCAmelCase_ ( )-> List[str]:
return is_wandb_available()
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> List[str]:
return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str )-> Dict:
return default_hp_space_wandb(lowercase_ )
__lowerCAmelCase : int ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ):
A__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
A__ = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
F"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 237
| 1
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __lowerCAmelCase ( UpperCamelCase__ = "isbn/0140328726" ) -> dict:
__lowerCamelCase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__lowerCamelCase = f"""{olid} is not a valid Open Library olid"""
raise ValueError(__lowerCAmelCase )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def __lowerCAmelCase ( UpperCamelCase__ ) -> dict:
__lowerCamelCase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__lowerCamelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__lowerCamelCase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__lowerCamelCase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = ''', '''.join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__UpperCAmelCase =input("\nEnter the ISBN code to search (or \'quit\' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__UpperCAmelCase =summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("\n".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 362
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a__ :
def __init__( self : List[Any] , a : Tuple , a : int , a : int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(a )][self.get_x(a )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase =8_0_0, 6_0_0
__UpperCAmelCase =imread("image_data/lena.jpg", 1)
__UpperCAmelCase =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 237
| 0
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( __A , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = XLMTokenizer
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowercase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = """lower newer"""
lowercase = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = XLMTokenizer(self.vocab_file , self.merges_file )
lowercase = """lower"""
lowercase = ["""low""", """er</w>"""]
lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
lowercase = tokens + ["""<unk>"""]
lowercase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase )
lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 195
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
| 0
|
import mpmath # for roots of unity
import numpy as np
class __UpperCAmelCase :
def __init__( self : Dict, __A : Optional[int]=None, __A : Any=None ):
# Input as list
UpperCAmelCase : Union[str, Any] = list(poly_a or [0] )[:]
UpperCAmelCase : Union[str, Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCAmelCase : Optional[Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCAmelCase : Any = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCAmelCase : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCAmelCase : Tuple = complex(mpmath.root(x=1, n=self.c_max_length, k=1 ) )
# The product
UpperCAmelCase : str = self.__multiply()
def __magic_name__ ( self : Tuple, __A : List[str] ):
UpperCAmelCase : str = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(a__ ) <= 1:
return dft[0]
#
UpperCAmelCase : Optional[int] = self.c_max_length // 2
while next_ncol > 0:
UpperCAmelCase : Dict = [[] for i in range(a__ )]
UpperCAmelCase : List[str] = self.root**next_ncol
# First half of next step
UpperCAmelCase : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCAmelCase : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCAmelCase : Dict = new_dft
UpperCAmelCase : Dict = next_ncol // 2
return dft[0]
def __magic_name__ ( self : Any ):
UpperCAmelCase : Any = self.__dft('''A''' )
UpperCAmelCase : Optional[Any] = self.__dft('''B''' )
UpperCAmelCase : List[str] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCAmelCase : Optional[int] = 2
while next_ncol <= self.c_max_length:
UpperCAmelCase : int = [[] for i in range(a__ )]
UpperCAmelCase : str = self.root ** (next_ncol // 2)
UpperCAmelCase : Dict = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCAmelCase : Any = new_inverse_c
next_ncol *= 2
# Unpack
UpperCAmelCase : Any = [round(x[0].real, 8 ) + round(x[0].imag, 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[int] ):
UpperCAmelCase : Any = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCAmelCase : str = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCAmelCase : Optional[Any] = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : Any, __A : Optional[int]=1_3, __A : Any=7, __A : Tuple=True, __A : int=True, __A : Dict=True, __A : Union[str, Any]=True, __A : Optional[int]=9_9, __A : Optional[int]=3_2, __A : Union[str, Any]=5, __A : Optional[int]=4, __A : str=3_7, __A : Union[str, Any]="gelu", __A : Optional[int]=0.1, __A : Optional[Any]=0.1, __A : Any=5_1_2, __A : List[str]=1_6, __A : Optional[int]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=False, __A : List[str]=True, __A : int="None", __A : List[str]=3, __A : Any=4, __A : Dict=None, ):
UpperCAmelCase : str = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : str = relative_attention
UpperCAmelCase : Any = position_biased_input
UpperCAmelCase : str = pos_att_type
UpperCAmelCase : Union[str, Any] = scope
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : List[str] = None
UpperCAmelCase : str = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def __magic_name__ ( self : Dict, __A : str ):
self.parent.assertListEqual(list(result.loss.size() ), [] )
def __magic_name__ ( self : List[str], __A : Dict, __A : int, __A : str, __A : List[str], __A : Dict, __A : str, __A : int ):
UpperCAmelCase : Optional[int] = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A, token_type_ids=__A )[0]
UpperCAmelCase : Optional[int] = model(__A, token_type_ids=__A )[0]
UpperCAmelCase : int = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict, __A : Union[str, Any], __A : Optional[Any], __A : Tuple, __A : Optional[int], __A : List[Any], __A : List[Any], __A : Optional[int] ):
UpperCAmelCase : int = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str], __A : str, __A : Optional[Any], __A : List[str], __A : Optional[int], __A : List[Any], __A : int, __A : Optional[int] ):
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __magic_name__ ( self : Any, __A : Tuple, __A : Any, __A : str, __A : List[Any], __A : Dict, __A : Optional[Any], __A : List[str] ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : int = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple, __A : List[str], __A : Tuple, __A : Tuple, __A : int, __A : Optional[Any], __A : Tuple, __A : Any ):
UpperCAmelCase : Union[str, Any] = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Any = model(
__A, attention_mask=__A, token_type_ids=__A, start_positions=__A, end_positions=__A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Dict, __A : Optional[int], __A : str, __A : List[str], __A : Dict, __A : Optional[Any], __A : Union[str, Any], __A : int ):
UpperCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = model(
__A, attention_mask=__A, token_type_ids=__A, labels=__A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = DebertaVaModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self, config_class=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
pass
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : List[str] = model(__A, attention_mask=__A )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __A, atol=1E-4 ), F'''{output[:, 1:4, 1:4]}''' )
| 99
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase , UpperCAmelCase : Any = np.shape(UpperCamelCase__ )
if rows != columns:
UpperCAmelCase : Any = (
"""\'table\' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(UpperCamelCase__ )
UpperCAmelCase : int = np.zeros((rows, columns) )
UpperCAmelCase : List[str] = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
UpperCAmelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
UpperCAmelCase : Optional[int] = (table[i][j] - total) / upper[j][j]
UpperCAmelCase : List[Any] = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase : List[str] = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
UpperCAmelCase : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : Dict = """ResNetConfig"""
# Base docstring
_UpperCAmelCase : Optional[int] = """microsoft/resnet-50"""
_UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7]
# Image classification docstring
_UpperCAmelCase : Tuple = """microsoft/resnet-50"""
_UpperCAmelCase : int = """tiger cat"""
_UpperCAmelCase : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = nn.Convad(
snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
snake_case_ = config.num_channels
def a ( self , snake_case ):
snake_case_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
snake_case_ = self.embedder(snake_case )
snake_case_ = self.pooler(snake_case )
return embedding
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 2 ):
super().__init__()
snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = out_channels // reduction
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ):
super().__init__()
snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
snake_case_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self , snake_case ):
snake_case_ = input
for layer in self.layers:
snake_case_ = layer(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ):
self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) )
def a ( self , snake_case , snake_case = False , snake_case = True ):
snake_case_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
snake_case_ = stage_module(snake_case )
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case , hidden_states=snake_case , )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = ResNetConfig
__SCREAMING_SNAKE_CASE : Any = '''resnet'''
__SCREAMING_SNAKE_CASE : int = '''pixel_values'''
__SCREAMING_SNAKE_CASE : Tuple = True
def a ( self , snake_case ):
if isinstance(snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self , snake_case , snake_case=False ):
if isinstance(snake_case , snake_case ):
snake_case_ = value
_UpperCAmelCase : Tuple = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : Optional[int] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
snake_case_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config.num_labels
snake_case_ = ResNetModel(snake_case )
# classification head
snake_case_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.pooler_output if return_dict else outputs[1]
snake_case_ = self.classifier(snake_case )
snake_case_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ = 'single_label_classification'
else:
snake_case_ = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case_ = MSELoss()
if self.num_labels == 1:
snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ = loss_fct(snake_case , snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ = BCEWithLogitsLoss()
snake_case_ = loss_fct(snake_case , snake_case )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowercase_ , )
class lowercase ( lowercase_ , lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
super()._init_backbone(snake_case )
snake_case_ = [config.embedding_size] + config.hidden_sizes
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.hidden_states
snake_case_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
snake_case_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
| 285
| 0
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int = 10_00 ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 353
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ : List[str] = GPTaConfig()
else:
UpperCAmelCase_ : int = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any],lowercase_ : VQModel,lowercase_ : UNetaDModel,lowercase_ : DDIMScheduler )-> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowercase_,unet=lowercase_,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : int,lowercase_ : int = 1,lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None,lowercase_ : float = 0.0,lowercase_ : int = 5_0,lowercase_ : Optional[str] = "pil",lowercase_ : bool = True,**lowercase_ : Dict,)-> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
A__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),generator=lowercase_,)
A__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A__ = self.scheduler.scale_model_input(lowercase_,lowercase_ )
# predict the noise residual
A__ = self.unet(lowercase_,lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
# decode the image latents with the VAE
A__ = self.vqvae.decode(lowercase_ ).sample
A__ = (image / 2 + 0.5).clamp(0,1 )
A__ = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 7
|
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , ) -> float:
'''simple docstring'''
A__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
A__ = 1 - (matter_density + radiation_density + dark_energy)
A__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowercase_ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 7
| 1
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase : List[Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
UpperCamelCase : Any = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
UpperCamelCase : List[str] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float')),
"references": datasets.Sequence(datasets.Value('float')),
}
else:
return {
"predictions": datasets.Value('float'),
"references": datasets.Value('float'),
}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict="uniform_average" , UpperCAmelCase_ : Optional[Any]=True):
"""simple docstring"""
a : Optional[Any] = mean_squared_error(
UpperCAmelCase_ , UpperCAmelCase_ , sample_weight=UpperCAmelCase_ , multioutput=UpperCAmelCase_ , squared=UpperCAmelCase_)
return {"mse": mse}
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345
| 0
|
"""simple docstring"""
from __future__ import annotations
class __snake_case :
def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = text, pattern
_lowerCamelCase , _lowerCamelCase : Optional[int] = len(__lowerCAmelCase ), len(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCamelCase : str = self.mismatch_in_text(__lowerCAmelCase )
if mismatch_index == -1:
positions.append(__lowerCAmelCase )
else:
_lowerCamelCase : str = self.match_in_pattern(self.text[mismatch_index] )
_lowerCamelCase : str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ = '''ABAABA'''
lowerCAmelCase__ = '''AB'''
lowerCAmelCase__ = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 72
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = KandinskyVaaPipeline
__lowercase = [
"""image_embeds""",
"""negative_image_embeds""",
]
__lowercase = ["""image_embeds""", """negative_image_embeds"""]
__lowercase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase = False
@property
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
return 32
@property
def UpperCAmelCase_ ( self :int )-> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self :Optional[Any] )-> Dict:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self :Any )-> List[Any]:
return 1_00
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]:
torch.manual_seed(0 )
A__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A__ = UNetaDConditionModel(**lowercase_ )
return model
@property
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self :Dict )-> Union[str, Any]:
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowercase_ , )
A__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self :Dict , lowercase_ :int , lowercase_ :Union[str, Any]=0 )-> Dict:
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase_ )
if str(lowercase_ ).startswith("mps" ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
A__ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = pipe(**self.get_dummy_inputs(lowercase_ ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict )-> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self :Any )-> List[Any]:
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
A__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
A__ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
A__ = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
A__ = "red cat, 4k photo"
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__, A__ = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__ = pipeline(
image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=1_00 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 237
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self : Tuple ):
torch.manual_seed(0 )
_snake_case = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowercase ( self : List[Any] ):
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowercase ( self : Tuple ):
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_snake_case = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowercase ( self : Dict ):
_snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_snake_case = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_snake_case = DDPMScheduler()
_snake_case = AudioDiffusionPipeline(vqvae=__lowerCamelCase , unet=self.dummy_unet , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
_snake_case = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
_snake_case = pipe(generator=__lowerCamelCase , steps=4 )
_snake_case = output.audios[0]
_snake_case = output.images[0]
_snake_case = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
_snake_case = pipe(generator=__lowerCamelCase , steps=4 , return_dict=__lowerCamelCase )
_snake_case = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_snake_case = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_snake_case = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_snake_case = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_snake_case = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_snake_case = DDIMScheduler()
_snake_case = self.dummy_vqvae_and_unet
_snake_case = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
_snake_case = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
_snake_case = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_snake_case = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
_snake_case = pipe(raw_audio=__lowerCamelCase , generator=__lowerCamelCase , start_step=5 , steps=10 )
_snake_case = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_snake_case = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_snake_case = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_snake_case = self.dummy_unet_condition
_snake_case = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowerCamelCase , mel=__lowerCamelCase , scheduler=__lowerCamelCase )
_snake_case = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
np.random.seed(0 )
_snake_case = torch.rand((1, 1, 10) )
_snake_case = pipe(generator=__lowerCamelCase , encoding=__lowerCamelCase )
_snake_case = output.images[0]
_snake_case = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_snake_case = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ):
_snake_case = torch_device
_snake_case = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_snake_case = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = torch.Generator(device=__lowerCamelCase ).manual_seed(42 )
_snake_case = pipe(generator=__lowerCamelCase )
_snake_case = output.audios[0]
_snake_case = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_snake_case = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_snake_case = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 367
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Optional[int]=64 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def lowercase ( self : List[str] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : Optional[Any] ):
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def lowercase ( self : Any ):
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def lowercase ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=False ):
with torch.no_grad():
_snake_case = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
_snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
_snake_case = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : int ):
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : str ):
pass
def lowercase ( self : Optional[int] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : str ):
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[int] ):
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase__ = 1e-4
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Optional[Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self : Any ):
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
_snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : str ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : Optional[int] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCamelCase )
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=1e-12 ):
UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase_ , axis=1 ) , a_min=lowercase_ ) ).T
UpperCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase_ , axis=1 ) , a_min=lowercase_ ) ).T
return jnp.matmul(lowercase_ , norm_emb_a.T )
class A_ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase = nn.Dense(self.config.projection_dim , use_bias=lowercase_ , dtype=self.dtype )
UpperCAmelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCAmelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCAmelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
UpperCAmelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self :List[Any] , lowercase_ :str ) -> int:
UpperCAmelCase = self.vision_model(lowercase_ )[1]
UpperCAmelCase = self.visual_projection(lowercase_ )
UpperCAmelCase = jax_cosine_distance(lowercase_ , self.special_care_embeds )
UpperCAmelCase = jax_cosine_distance(lowercase_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase = 0.0
UpperCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase = jnp.round(lowercase_ , 3 )
UpperCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=lowercase_ )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase = is_special_care * 0.01
UpperCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase = jnp.round(lowercase_ , 3 )
UpperCAmelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = """clip_input"""
__UpperCamelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self :Optional[Any] , lowercase_ :CLIPConfig , lowercase_ :Optional[Tuple] = None , lowercase_ :int = 0 , lowercase_ :jnp.dtype = jnp.floataa , lowercase_ :bool = True , **lowercase_ :List[Any] , ) -> Optional[Any]:
if input_shape is None:
UpperCAmelCase = (1, 2_24, 2_24, 3)
UpperCAmelCase = self.module_class(config=lowercase_ , dtype=lowercase_ , **lowercase_ )
super().__init__(lowercase_ , lowercase_ , input_shape=lowercase_ , seed=lowercase_ , dtype=lowercase_ , _do_init=_do_init )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :jax.random.KeyArray , lowercase_ :Tuple , lowercase_ :FrozenDict = None ) -> FrozenDict:
# init input tensor
UpperCAmelCase = jax.random.normal(lowercase_ , lowercase_ )
UpperCAmelCase , UpperCAmelCase = jax.random.split(lowercase_ )
UpperCAmelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCAmelCase = self.module.init(lowercase_ , lowercase_ )['params']
return random_params
def __call__( self :List[Any] , lowercase_ :Tuple , lowercase_ :dict = None , ) -> Optional[int]:
UpperCAmelCase = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(lowercase_ , dtype=jnp.floataa ) , rngs={} , )
| 78
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase : Tuple = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowercase = " ") -> Tuple:
'''simple docstring'''
a__ : Tuple = sentence_delimiter
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return list(lowercase)
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = []
for sent_idx, sentence in enumerate(lowercase):
chars.extend(self.process_string(lowercase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase) - 1:
chars.append(self.sentence_delimiter)
return chars
lowercase : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase : List[Any] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowercase : Optional[int] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowercase : Optional[Any] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Any:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
a__ : Optional[int] = 0
a__ : str = 0
for prediction, reference in zip(lowercase , lowercase):
a__ : Optional[int] = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 99
| 0
|
"""simple docstring"""
def snake_case ( A__ ,A__ ):
return base * power(lowercase_ ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowerCamelCase_ : List[Any] = int(input('''Enter the base: ''').strip())
lowerCamelCase_ : str = int(input('''Enter the exponent: ''').strip())
lowerCamelCase_ : Optional[Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCamelCase_ : Union[str, Any] = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 362
|
"""simple docstring"""
from math import factorial
lowerCamelCase_ = {str(d): factorial(d) for d in range(10)}
def snake_case ( A__ ):
return sum(DIGIT_FACTORIAL[d] for d in str(A__ ) )
def snake_case ( ):
UpperCAmelCase_ : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,A__ ) if sum_of_digit_factorial(A__ ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 253
| 0
|
import math
import os
import sys
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = ''
try:
with open(UpperCamelCase__ , 'rb' ) as binary_file:
__lowerCamelCase = binary_file.read()
for dat in data:
__lowerCamelCase = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( UpperCamelCase__ : dict[str, str] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
lexicon.pop(UpperCamelCase__ )
__lowerCamelCase = last_match_id
if math.loga(UpperCamelCase__ ).is_integer():
for curr_key in lexicon:
__lowerCamelCase = '0' + lexicon[curr_key]
__lowerCamelCase = bin(UpperCamelCase__ )[2:]
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = {'0': '0', '1': '1'}
__lowerCamelCase , __lowerCamelCase = '', ''
__lowerCamelCase = len(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCamelCase = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
index += 1
__lowerCamelCase = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__lowerCamelCase = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = os.path.getsize(UpperCamelCase__ )
__lowerCamelCase = bin(UpperCamelCase__ )[2:]
__lowerCamelCase = len(UpperCamelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = 8
try:
with open(UpperCamelCase__ , 'wb' ) as opened_file:
__lowerCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCamelCase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = read_file_binary(UpperCamelCase__ )
__lowerCamelCase = compress_data(UpperCamelCase__ )
__lowerCamelCase = add_file_length(UpperCamelCase__ , UpperCamelCase__ )
write_file_binary(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 90
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCamelCase_ :
def __init__( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=99 , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Optional[int]=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Tuple=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Optional[Any]=0.0_2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : Optional[int]=None , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : int = use_token_type_ids
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[str]:
UpperCAmelCase_ : Any = FalconModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , ) -> Tuple:
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : List[Any] = FalconModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
UpperCAmelCase_ : Optional[int] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
UpperCAmelCase_ : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , ) -> int:
UpperCAmelCase_ : Union[str, Any] = FalconForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , ) -> Tuple:
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : List[Any] = FalconForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : str = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : Dict = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["hidden_states"][0]
UpperCAmelCase_ : List[str] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , __A , unittest.TestCase ):
__magic_name__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (FalconForCausalLM,) if is_torch_available() else ()
__magic_name__ = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = FalconModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ , *UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCAmelCase_ : Tuple = alibi
self.model_tester.create_and_check_model(lowerCAmelCase_ , *lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : Optional[Any] = input_dict["input_ids"]
UpperCAmelCase_ : Any = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : List[Any] = FalconForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Optional[Any] = "single_label_classification"
UpperCAmelCase_ : int = input_dict["input_ids"]
UpperCAmelCase_ : int = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : List[str] = FalconForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = input_dict["input_ids"]
UpperCAmelCase_ : Union[str, Any] = FalconForCausalLM(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = input_ids.shape[0]
UpperCAmelCase_ : int = model._convert_to_rw_cache(result.past_key_values )
UpperCAmelCase_ : Dict = model._convert_cache_to_standard_format(lowerCAmelCase_ , lowerCAmelCase_ )
for layer in range(len(lowerCAmelCase_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : str = "multi_label_classification"
UpperCAmelCase_ : Union[str, Any] = input_dict["input_ids"]
UpperCAmelCase_ : Union[str, Any] = input_ids.ne(1 ).to(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Any = FalconForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCAmelCase_ , "use_cache" ):
return
UpperCAmelCase_ : int = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ )
if "use_cache" not in inputs:
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Dict = model(**lowerCAmelCase_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCAmelCase_ : str = (
getattr(lowerCAmelCase_ , "decoder_layers" , lowerCAmelCase_ )
or getattr(lowerCAmelCase_ , "num_decoder_layers" , lowerCAmelCase_ )
or config.num_hidden_layers
)
UpperCAmelCase_ : int = getattr(lowerCAmelCase_ , "num_kv_heads" , config.num_attention_heads )
UpperCAmelCase_ : Optional[int] = getattr(lowerCAmelCase_ , "d_model" , config.hidden_size )
UpperCAmelCase_ : Tuple = embed_dim // num_attention_heads
UpperCAmelCase_ : Optional[int] = outputs["past_key_values"]
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = inputs["input_ids"].shape
for i in range(lowerCAmelCase_ ):
if config.new_decoder_architecture:
UpperCAmelCase_ : Union[str, Any] = config.num_attention_heads
elif config.multi_query:
UpperCAmelCase_ : Dict = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
UpperCAmelCase_ : Any = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCAmelCase_ )
UpperCAmelCase_ : str = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
UpperCAmelCase_ : List[Any] = model.generate(**lowerCAmelCase_ , do_sample=lowerCAmelCase_ , max_new_tokens=19 )
UpperCAmelCase_ : Optional[Any] = tokenizer.batch_decode(lowerCAmelCase_ )[0]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = FalconForCausalLM.from_pretrained(lowerCAmelCase_ )
model.eval()
model.to(lowerCAmelCase_ )
UpperCAmelCase_ : str = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCAmelCase_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCAmelCase_ , do_sample=lowerCAmelCase_ , max_new_tokens=4 )
model.generate(**lowerCAmelCase_ , do_sample=lowerCAmelCase_ , max_new_tokens=4 )
model.generate(**lowerCAmelCase_ , num_beams=2 , max_new_tokens=4 )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = FalconForCausalLM.from_pretrained(lowerCAmelCase_ )
model.eval()
model.to(device=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCAmelCase_ )
# Test results are the same with and without cache
UpperCAmelCase_ : Optional[Any] = model.generate(**lowerCAmelCase_ , do_sample=lowerCAmelCase_ , max_new_tokens=20 , use_cache=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(**lowerCAmelCase_ , do_sample=lowerCAmelCase_ , max_new_tokens=20 , use_cache=lowerCAmelCase_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 253
|
"""simple docstring"""
from math import factorial
lowerCamelCase_ = {str(d): factorial(d) for d in range(10)}
def snake_case ( A__ ):
return sum(DIGIT_FACTORIAL[d] for d in str(A__ ) )
def snake_case ( ):
UpperCAmelCase_ : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,A__ ) if sum_of_digit_factorial(A__ ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 253
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , ) -> Dict:
A_ : Any = parent
A_ : str = 13
A_ : Optional[Any] = 7
A_ : str = True
A_ : Union[str, Any] = True
A_ : Optional[Any] = True
A_ : str = 99
A_ : Union[str, Any] = 32
A_ : Any = 2
A_ : Optional[Any] = 4
A_ : str = 37
A_ : List[Any] = """gelu"""
A_ : str = 0.1
A_ : int = 0.1
A_ : Dict = 512
A_ : List[Any] = 16
A_ : Optional[Any] = 2
A_ : Optional[Any] = 0.02
A_ : Union[str, Any] = 3
A_ : List[str] = 4
A_ : Optional[int] = None
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[Any] = None
if self.use_input_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Tuple = None
A_ : Dict = None
A_ : Tuple = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
A_ : str = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> int:
(
A_
) : Tuple = self.prepare_config_and_inputs()
A_ : Any = True
A_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Optional[int] = TFEsmModel(config=lowerCamelCase_ )
A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A_ : Dict = model(lowerCamelCase_ )
A_ : Tuple = [input_ids, input_mask]
A_ : Any = model(lowerCamelCase_ )
A_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> List[Any]:
A_ : List[Any] = True
A_ : Any = TFEsmModel(config=lowerCamelCase_ )
A_ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
A_ : Dict = model(lowerCamelCase_ )
A_ : int = [input_ids, input_mask]
A_ : List[Any] = model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
A_ : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = TFEsmForMaskedLM(config=lowerCamelCase_ )
A_ : int = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
A_ : str = self.num_labels
A_ : List[str] = TFEsmForTokenClassification(config=lowerCamelCase_ )
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Any = self.prepare_config_and_inputs()
(
A_
) : Optional[Any] = config_and_inputs
A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = TFEsmModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def UpperCAmelCase_ ( self ) -> int:
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
def UpperCAmelCase_ ( self ) -> Any:
A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A_ : Optional[Any] = model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable )
else:
A_ : Union[str, Any] = model.get_output_embeddings()
assert x is None
A_ : Optional[int] = model.get_bias()
assert name is None
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> int:
A_ : Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ : Tuple = model(lowerCamelCase_ )[0]
A_ : Any = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
A_ : Union[str, Any] = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A_ : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A_ : Optional[Any] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
A_ : Any = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 344
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345
| 0
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCamelCase :Optional[int] = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
A_, A_, A_, A_ : Tuple = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
A_ : Tuple = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
A_ : Dict = config_class.from_json_file(lowerCamelCase__ )
A_ : List[Any] = True
A_ : Tuple = True
print(f'Building TensorFlow model from configuration: {config}' )
A_ : Union[str, Any] = model_class(lowerCamelCase__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
A_ : List[str] = cached_file(
lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
A_ : Optional[Any] = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase__ , lowerCamelCase__ )
if compare_with_pt_model:
A_ : str = tf_model(tf_model.dummy_inputs , training=lowerCamelCase__ ) # build the network
A_ : List[Any] = torch.load(lowerCamelCase__ , map_location="""cpu""" )
A_ : List[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ )
with torch.no_grad():
A_ : List[Any] = pt_model(**pt_model.dummy_inputs )
A_ : Optional[int] = pto[0].numpy()
A_ : str = tfo[0].numpy()
A_ : Union[str, Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(lowerCamelCase__ , save_format="""h5""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ):
'''simple docstring'''
if args_model_type is None:
A_ : List[str] = list(MODEL_CLASSES.keys() )
else:
A_ : Dict = [args_model_type]
for j, model_type in enumerate(lowerCamelCase__ , start=1 ):
print("""=""" * 1_00 )
print(f' Converting model type {j}/{len(lowerCamelCase__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
A_, A_, A_, A_, A_ : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
A_ : Tuple = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
A_ : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase__ , lowerCamelCase__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
A_ : Optional[int] = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(lowerCamelCase__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
A_ : int = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
else:
A_ : int = config_shortcut_name
if model_shortcut_name in aws_model_maps:
A_ : List[Any] = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
else:
A_ : Optional[int] = model_shortcut_name
if os.path.isfile(lowerCamelCase__ ):
A_ : Optional[int] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase__ , pytorch_checkpoint_path=lowerCamelCase__ , config_file=lowerCamelCase__ , tf_dump_path=os.path.join(lowerCamelCase__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=lowerCamelCase__ , )
if remove_cached_files:
os.remove(lowerCamelCase__ )
os.remove(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowerCamelCase :Optional[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 135
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'bart'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , lowercase=50265 , lowercase=1024 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=0.0 , lowercase=0.0 , lowercase="gelu" , lowercase=1024 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0.0 , lowercase=False , lowercase=True , lowercase=3 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=True , lowercase=2 , lowercase=2 , **lowercase , ):
A_ : Optional[int] = vocab_size
A_ : Dict = max_position_embeddings
A_ : Dict = d_model
A_ : Any = encoder_ffn_dim
A_ : Dict = encoder_layers
A_ : Optional[int] = encoder_attention_heads
A_ : Tuple = decoder_ffn_dim
A_ : List[str] = decoder_layers
A_ : int = decoder_attention_heads
A_ : Dict = dropout
A_ : List[str] = attention_dropout
A_ : int = activation_dropout
A_ : Dict = activation_function
A_ : List[Any] = init_std
A_ : Dict = encoder_layerdrop
A_ : Tuple = decoder_layerdrop
A_ : Optional[int] = classifier_dropout
A_ : Union[str, Any] = use_cache
A_ : Dict = encoder_layers
A_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , lowercase ):
A_ : List[str] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task in ["default", "seq2seq-lm"]:
A_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_ : Union[str, Any] = {0: """batch"""}
A_ : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
A_ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_, A_ : Optional[int] = self.num_layers
for i in range(lowercase ):
A_ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
A_ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _a (self ):
if self.task in ["default", "seq2seq-lm"]:
A_ : int = super().outputs
else:
A_ : int = super(lowercase , self ).outputs
if self.use_past:
A_, A_ : Union[str, Any] = self.num_layers
for i in range(lowercase ):
A_ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
A_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
A_ : int = seq_length if not self.use_past else 1
A_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Optional[int] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A_ : Dict = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_, A_ : Union[str, Any] = common_inputs["""input_ids"""].shape
A_ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
A_, A_ : str = self.num_attention_heads
A_ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Tuple = decoder_seq_length + 3
A_ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowercase , lowercase )] , dim=1 )
A_ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_, A_ : Optional[int] = self.num_layers
A_ : List[Any] = min(lowercase , lowercase )
A_ : Tuple = max(lowercase , lowercase ) - min_num_layers
A_ : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
A_ : List[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
A_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_, A_ : List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ : Optional[Any] = seqlen + 2
A_, A_ : str = self.num_layers
A_, A_ : Optional[int] = self.num_attention_heads
A_ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Union[str, Any] = common_inputs["""attention_mask"""].dtype
A_ : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
A_ : int = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : List[Any] = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Union[str, Any] = tokenizer.num_special_tokens_to_add(lowercase )
A_ : List[Any] = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
A_ : str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : Tuple = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
if self.task in ["default", "seq2seq-lm"]:
A_ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
elif self.task == "causal-lm":
A_ : List[str] = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
A_ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def _a (self , lowercase , lowercase , lowercase , lowercase ):
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
A_ : List[Any] = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
| 135
| 1
|
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
if len(__magic_name__ ) != 32:
raise ValueError("""Input must be of length 32""" )
UpperCamelCase :int = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:]
UpperCamelCase :Union[str, Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :str = B""""""
for char in message:
bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" )
UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__magic_name__ ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCamelCase :Tuple = bit_string[pos : pos + 512]
UpperCamelCase :Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :List[str] = format(__magic_name__ , """032b""" )
UpperCamelCase :Any = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :Tuple = preprocess(__magic_name__ )
UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase :Union[str, Any] = 0X67_45_23_01
UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89
UpperCamelCase :List[str] = 0X98_BA_DC_FE
UpperCamelCase :int = 0X10_32_54_76
UpperCamelCase :int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCamelCase :Optional[Any] = aa
UpperCamelCase :Any = ba
UpperCamelCase :Tuple = ca
UpperCamelCase :List[str] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase :int = d ^ (b & (c ^ d))
UpperCamelCase :Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase :str = c ^ (d & (b ^ c))
UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase :str = b ^ c ^ d
UpperCamelCase :Optional[int] = (3 * i + 5) % 16
else:
UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ ))
UpperCamelCase :int = (7 * i) % 16
UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase :Tuple = d
UpperCamelCase :str = c
UpperCamelCase :Tuple = b
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40
| 0
|
"""simple docstring"""
def __lowercase ( snake_case_ : str ) ->Any:
'''simple docstring'''
__A : Any = 0
for ch in input_str:
__A : Any = ord(A__ )
__A : Optional[int] = pow(2 ,A__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
a_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
a_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowercase ( snake_case_ : float ,snake_case_ : str ,snake_case_ : str ) ->float:
'''simple docstring'''
__A : Tuple = from_type.lower().strip('''s''' )
__A : Optional[int] = to_type.lower().strip('''s''' )
__A : List[str] = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
__A : Any = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__A : int = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__A : str = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
__A : Optional[Any] = METRIC_CONVERSION[from_sanitized]
__A : Optional[int] = METRIC_CONVERSION[to_sanitized]
__A : Union[str, Any] = 1
if from_exponent > to_exponent:
__A : Dict = from_exponent - to_exponent
else:
__A : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(10 ,snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291
| 0
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 28
|
import math
def A_ ( a , a = 0 , a = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = end or len(a )
for i in range(a , a ):
SCREAMING_SNAKE_CASE_ : List[Any] = i
SCREAMING_SNAKE_CASE_ : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE_ : Tuple = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE_ : str = temp_index_value
return array
def A_ ( a , a , a ): # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = index
SCREAMING_SNAKE_CASE_ : str = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE_ : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE_ : Any = right_index
if largest != index:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[largest], array[index]
heapify(a , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(a )
for i in range(n // 2 , -1 , -1 ):
heapify(a , a , a )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[0], array[i]
heapify(a , 0 , a )
return array
def A_ ( a , a , a , a ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = low
SCREAMING_SNAKE_CASE_ : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[j], array[i]
i += 1
def A_ ( a ):
"""simple docstring"""
if len(a ) == 0:
return array
SCREAMING_SNAKE_CASE_ : Any = 2 * math.ceil(math.loga(len(a ) ) )
SCREAMING_SNAKE_CASE_ : int = 1_6
return intro_sort(a , 0 , len(a ) , a , a )
def A_ ( a , a , a , a , a ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a )
max_depth -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE_ : Dict = partition(a , a , a , a )
intro_sort(a , a , a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = p
return insertion_sort(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = input('Enter numbers separated by a comma : ').strip()
lowerCAmelCase : Optional[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 253
| 0
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = ["input_values", "padding_mask"]
def __init__(self : Dict , snake_case_ : int = 1 , snake_case_ : int = 2_4_0_0_0 , snake_case_ : float = 0.0 , snake_case_ : float = None , snake_case_ : float = None , **snake_case_ : Dict , ):
super().__init__(feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ )
__a : Union[str, Any] = chunk_length_s
__a : Dict = overlap
@property
def lowerCAmelCase (self : List[str] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase (self : List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__(self : List[str] , snake_case_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case_ : Optional[Union[bool, str, PaddingStrategy]] = None , snake_case_ : Optional[bool] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
__a : Tuple = True
__a : Optional[int] = bool(
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__a : str = [np.asarray(snake_case_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
__a : int = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__a : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__a : Union[str, Any] = [np.asarray(snake_case_ ).T]
# verify inputs are valid
for idx, example in enumerate(snake_case_ ):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels" )
__a : Union[str, Any] = None
__a : Optional[Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__a : int = min(array.shape[0] for array in raw_audio )
__a : Any = int(np.floor(max_length / self.chunk_stride ) )
__a : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__a : Optional[int] = max(array.shape[0] for array in raw_audio )
__a : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
__a : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
__a : Optional[int] = '''max_length'''
else:
__a : Union[str, Any] = input_values
# normal padding on batch
if padded_inputs is None:
__a : str = self.pad(
snake_case_ , max_length=snake_case_ , truncation=snake_case_ , padding=snake_case_ , return_attention_mask=snake_case_ , )
if padding:
__a : int = padded_inputs.pop('''attention_mask''' )
__a : Optional[int] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
__a : Optional[Any] = example[..., None]
input_values.append(example.T )
__a : Optional[Any] = input_values
if return_tensors is not None:
__a : int = padded_inputs.convert_to_tensors(snake_case_ )
return padded_inputs
| 90
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ ={
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 90
| 1
|
from jiwer import compute_measures
import datasets
lowerCAmelCase : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCAmelCase : List[Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCAmelCase : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _A ( datasets.Metric):
def UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"]
else:
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 253
|
import math
def A_ ( a , a = 0 , a = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = end or len(a )
for i in range(a , a ):
SCREAMING_SNAKE_CASE_ : List[Any] = i
SCREAMING_SNAKE_CASE_ : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE_ : Tuple = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE_ : str = temp_index_value
return array
def A_ ( a , a , a ): # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = index
SCREAMING_SNAKE_CASE_ : str = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE_ : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE_ : Any = right_index
if largest != index:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[largest], array[index]
heapify(a , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(a )
for i in range(n // 2 , -1 , -1 ):
heapify(a , a , a )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[0], array[i]
heapify(a , 0 , a )
return array
def A_ ( a , a , a , a ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = low
SCREAMING_SNAKE_CASE_ : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[j], array[i]
i += 1
def A_ ( a ):
"""simple docstring"""
if len(a ) == 0:
return array
SCREAMING_SNAKE_CASE_ : Any = 2 * math.ceil(math.loga(len(a ) ) )
SCREAMING_SNAKE_CASE_ : int = 1_6
return intro_sort(a , 0 , len(a ) , a , a )
def A_ ( a , a , a , a , a ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a )
max_depth -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE_ : Dict = partition(a , a , a , a )
intro_sort(a , a , a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = p
return insertion_sort(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = input('Enter numbers separated by a comma : ').strip()
lowerCAmelCase : Optional[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 253
| 1
|
'''simple docstring'''
import random
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = a[left_index]
_UpperCAmelCase : Optional[int] = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase_ ):
if a[j] < pivot:
_UpperCAmelCase : Any = a[i], a[j]
i += 1
_UpperCAmelCase : Any = a[i - 1], a[left_index]
return i - 1
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
'''simple docstring'''
if left < right:
_UpperCAmelCase : Tuple = random.randint(lowerCAmelCase_ , right - 1 )
_UpperCAmelCase : Optional[int] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCAmelCase : Tuple = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def snake_case_ ( )-> Dict:
'''simple docstring'''
_UpperCAmelCase : str = input("""Enter numbers separated by a comma:\n""" ).strip()
_UpperCAmelCase : int = [int(lowerCAmelCase_ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 353
|
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
| 0
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = TaTokenizer
snake_case__ = []
def __init__( self : List[Any] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Union[str, Any]="<unk>" , UpperCAmelCase : int="<pad>" , UpperCAmelCase : str=100 , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Dict , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCamelCase : Optional[Any] = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowerCamelCase : int = len(set(filter(lambda UpperCAmelCase : bool("extra_id_" in str(UpperCAmelCase ) ) , UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , extra_ids=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : Optional[Any] = vocab_file
__lowerCamelCase : int = False if not self.vocab_file else True
__lowerCamelCase : Tuple = extra_ids
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowerCamelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , UpperCAmelCase , )
return max_model_length
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase : Union[str, Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Union[str, Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowerCamelCase : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self : Dict ):
return list(
set(filter(lambda UpperCAmelCase : bool(re.search(r"<extra_id_\d+>" , UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCamelCase__ ( self : Optional[Any] ):
return [self.convert_tokens_to_ids(UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 135
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "camembert"
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any]=30522 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Tuple=1E-12 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , UpperCAmelCase : str="absolute" , UpperCAmelCase : Dict=True , UpperCAmelCase : int=None , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : List[Any] = classifier_dropout
class _snake_case ( a__ ):
@property
def lowerCamelCase__ ( self : int ):
if self.task == "multiple-choice":
__lowerCamelCase : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 135
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = """roformer"""
def __init__( self , a=5_0000 , a=None , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=1536 , a=2 , a=0.02 , a=1e-12 , a=0 , a=False , a=True , **a , ):
super().__init__(pad_token_id=a , **a)
lowercase__ : Optional[int] = vocab_size
lowercase__ : Tuple = hidden_size if embedding_size is None else embedding_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : str = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Dict = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[int] = rotary_value
lowercase__ : Optional[int] = use_cache
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
lowercase__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ : Dict = {0: 'batch', 1: 'sequence'}
lowercase__ : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 216
|
import math
import sys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
lowercase__ : Tuple = [-1] * (number + 1)
lowercase__ : Tuple = 0
for i in range(1 , number + 1 ):
lowercase__ : Tuple = sys.maxsize
lowercase__ : str = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase__ : List[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 1
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = BertTokenizer
__lowerCamelCase = BertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
def snake_case ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] )
def snake_case ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = tokenizer.tokenize(_snake_case )
_lowerCAmelCase = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# With lower casing
_lowerCAmelCase = self.get_tokenizer(do_lower_case=_snake_case )
_lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=_snake_case )
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = tokenizer.tokenize(_snake_case )
_lowerCAmelCase = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_snake_case )
_lowerCAmelCase = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer(do_lower_case=_snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = BasicTokenizer()
_lowerCAmelCase = """a\n'll !!to?'d of, can't."""
_lowerCAmelCase = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_lowerCAmelCase = {}
for i, token in enumerate(_snake_case ):
_lowerCAmelCase = i
_lowerCAmelCase = WordpieceTokenizer(vocab=_snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_lowerCAmelCase = tokenizer_r.encode_plus(
_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , )
_lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_snake_case , """do_lower_case""" ) else False
_lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ["""的""", """人""", """有"""]
_lowerCAmelCase = """""".join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase = False
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_snake_case )
_lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 82
|
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ ) -> bool:
if len(snake_case__ ) == 0:
return False
lowerCamelCase = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
lowerCAmelCase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase : Union[str, Any] = """""" if binary_search(sequence, target) else """not """
print(F"""{target} was {not_str}found in {sequence}""")
| 291
| 0
|
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = [[0 for _ in range(a__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__SCREAMING_SNAKE_CASE = 1
for n in range(m + 1 ):
for k in range(1 , a__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ : Optional[Any] =int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ : str =int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 118
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
| 118
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = TapasConfig.from_json_file(UpperCamelCase__ )
# set absolute/relative position embeddings parameter
__lowerCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
__lowerCamelCase = 4
__lowerCamelCase = True
# hparam_utils.py hparams
__lowerCamelCase = 0.66_46_94
__lowerCamelCase = 0.20_79_51
__lowerCamelCase = 0.12_11_94
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = 0.0_35_25_13
__lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowerCamelCase = 4
__lowerCamelCase = False
# hparam_utils.py hparams
__lowerCamelCase = 36.45_19
__lowerCamelCase = 0.90_34_21
__lowerCamelCase = 2_22.0_88
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = 0.76_31_41
__lowerCamelCase = TapasForQuestionAnswering(config=UpperCamelCase__ )
elif task == "TABFACT":
__lowerCamelCase = TapasForSequenceClassification(config=UpperCamelCase__ )
elif task == "MLM":
__lowerCamelCase = TapasForMaskedLM(config=UpperCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
__lowerCamelCase = TapasModel(config=UpperCamelCase__ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase__ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowerCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 90
|
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''onnx''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['onnx'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
| 90
| 1
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCAmelCase__ ( self : Tuple , A : str=0 ):
__snake_case: List[str] = np.random.RandomState(A )
__snake_case: List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = self.get_dummy_inputs()
__snake_case: List[Any] = pipe(**A ).images
__snake_case: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Any = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
__snake_case: str = self.get_dummy_inputs()
__snake_case: Union[str, Any] = pipe(**A ).images
__snake_case: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: List[str] = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
__snake_case: str = self.get_dummy_inputs()
__snake_case: Optional[int] = pipe(**A ).images
__snake_case: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Optional[int] = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
__snake_case: Union[str, Any] = self.get_dummy_inputs()
__snake_case: int = pipe(**A ).images
__snake_case: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Optional[int] = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = self.get_dummy_inputs()
__snake_case: Optional[Any] = pipe(**A ).images
__snake_case: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: List[Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__snake_case: Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
__snake_case: str = self.get_dummy_inputs()
__snake_case: Any = pipe(**A ).images
__snake_case: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case: Optional[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A )
__snake_case: str = self.get_dummy_inputs()
__snake_case: List[Any] = 3 * [inputs["""prompt"""]]
# forward
__snake_case: str = pipe(**A )
__snake_case: List[Any] = output.images[0, -3:, -3:, -1]
__snake_case: Optional[Any] = self.get_dummy_inputs()
__snake_case: Dict = 3 * [inputs.pop("""prompt""" )]
__snake_case: Optional[int] = pipe.tokenizer(
A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=A , return_tensors="""np""" , )
__snake_case: Union[str, Any] = text_inputs["""input_ids"""]
__snake_case: int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__snake_case: Optional[Any] = prompt_embeds
# forward
__snake_case: List[str] = pipe(**A )
__snake_case: Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[Any] = self.get_dummy_inputs()
__snake_case: List[Any] = 3 * ["""this is a negative prompt"""]
__snake_case: Any = negative_prompt
__snake_case: Dict = 3 * [inputs["""prompt"""]]
# forward
__snake_case: Union[str, Any] = pipe(**A )
__snake_case: List[Any] = output.images[0, -3:, -3:, -1]
__snake_case: Optional[Any] = self.get_dummy_inputs()
__snake_case: Union[str, Any] = 3 * [inputs.pop("""prompt""" )]
__snake_case: List[str] = []
for p in [prompt, negative_prompt]:
__snake_case: Union[str, Any] = pipe.tokenizer(
A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=A , return_tensors="""np""" , )
__snake_case: List[Any] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__snake_case , __snake_case: Optional[int] = embeds
# forward
__snake_case: Dict = pipe(**A )
__snake_case: List[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : str ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = ort.SessionOptions()
__snake_case: List[str] = False
return options
def UpperCAmelCase__ ( self : int ):
# using the PNDM scheduler by default
__snake_case: int = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__snake_case: Optional[Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__snake_case: Optional[int] = output.images
__snake_case: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: Optional[Any] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: List[Any] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: Dict = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=A )
__snake_case: str = """open neural network exchange"""
__snake_case: List[Any] = np.random.RandomState(0 )
__snake_case: Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" )
__snake_case: Tuple = output.images
__snake_case: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: Tuple = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=A )
__snake_case: str = """open neural network exchange"""
__snake_case: Optional[int] = np.random.RandomState(0 )
__snake_case: str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" )
__snake_case: List[str] = output.images
__snake_case: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: List[str] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Any = 0
def test_callback_fn(A : int , A : int , A : np.ndarray ) -> None:
__snake_case: Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__snake_case: str = latents[0, -3:, -3:, -1]
__snake_case: Optional[Any] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__snake_case: Optional[int] = latents[0, -3:, -3:, -1]
__snake_case: List[str] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__snake_case: Dict = False
__snake_case: Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: List[str] = """Andromeda galaxy in a bottle"""
__snake_case: Union[str, Any] = np.random.RandomState(0 )
pipe(
prompt=A , num_inference_steps=5 , guidance_scale=7.5 , generator=A , callback=A , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(A , A )
assert pipe.safety_checker is None
__snake_case: List[str] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
__snake_case: List[str] = OnnxStableDiffusionPipeline.from_pretrained(A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case: int = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 293
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 1
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
def __init__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : List[Any] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : List[str] = False , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Union[str, Any] = True , lowerCAmelCase : str = "arrow" , **lowerCAmelCase : Optional[int] , ):
super().__init__(
split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = load_from_cache_file
lowerCAmelCase = file_format
lowerCAmelCase = Spark(
df=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , working_dir=lowerCAmelCase , **lowerCAmelCase , )
def __lowercase ( self : Dict ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 155
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349
| 0
|
"""simple docstring"""
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _a ( _snake_case , _snake_case=0 ):
"""simple docstring"""
return sorted(_snake_case , key=lambda _snake_case : x[column] )
def _a ( _snake_case , _snake_case , _snake_case=float("""inf""" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _snake_case ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _a ( _snake_case , _snake_case , _snake_case=float("""inf""" ) ):
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _snake_case ):
for j in range(max(0 , i - 6 ) , _snake_case ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_snake_case , _snake_case )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[:mid] , _snake_case )
UpperCAmelCase = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(_snake_case , _snake_case )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_snake_case )
UpperCAmelCase = dis_between_closest_in_strip(
_snake_case , len(_snake_case ) , _snake_case )
return min(_snake_case , _snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = column_based_sort(_snake_case , column=0 )
UpperCAmelCase = column_based_sort(_snake_case , column=1 )
return (
closest_pair_of_points_sqr(
_snake_case , _snake_case , _snake_case )
) ** 0.5
if __name__ == "__main__":
_UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 234
|
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ ={'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ ={
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowercase__ ='ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowercase__ ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : List[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( lowerCAmelCase__ : tuple ):
return x[0]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : Optional[Any] = get_letter_count(lowerCAmelCase__ )
__a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
__a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
__a : int = ''''''.join(freq_to_letter[freq] )
__a : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = get_frequency_order(lowerCAmelCase__ )
__a : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 1
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = RobertaConfig
a_ : Dict = """roberta"""
def __init__( self , __UpperCAmelCase) ->Dict:
super().__init__(__UpperCAmelCase)
a_ = RobertaEmbeddings(__UpperCAmelCase)
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = RobertaConfig
a_ : Tuple = """roberta"""
def __init__( self , __UpperCAmelCase) ->Tuple:
super().__init__(__UpperCAmelCase)
a_ = config.num_labels
a_ = config.num_hidden_layers
a_ = DeeRobertaModel(__UpperCAmelCase)
a_ = nn.Dropout(config.hidden_dropout_prob)
a_ = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=-1 , __UpperCAmelCase=False , ) ->Tuple:
a_ = self.num_layers
try:
a_ = self.roberta(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
a_ = outputs[1]
a_ = self.dropout(__UpperCAmelCase)
a_ = self.classifier(__UpperCAmelCase)
a_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a_ = e.message
a_ = e.exit_layer
a_ = outputs[0]
if not self.training:
a_ = entropy(__UpperCAmelCase)
a_ = []
a_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a_ = MSELoss()
a_ = loss_fct(logits.view(-1) , labels.view(-1))
else:
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
a_ = []
for highway_exit in outputs[-1]:
a_ = highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
a_ = MSELoss()
a_ = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
a_ = CrossEntropyLoss()
a_ = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(__UpperCAmelCase)
if train_highway:
a_ = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
a_ = (loss,) + outputs
if not self.training:
a_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 1
|
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CpmAntTokenizer
lowerCamelCase__ = False
def __A ( self : List[str] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = ["今天", "天气", "真", "好", "!"]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 118
| 1
|
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[str] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE : str = list(accumulate(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : str = bisect(__UpperCamelCase ,__UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
UpperCamelCase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Any = ['''input_ids''', '''attention_mask''']
A : Dict = NllbTokenizer
A : List[int] = []
A : List[int] = []
def __init__( self, A=None, A=None, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, A=False, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
SCREAMING_SNAKE_CASE : Tuple = legacy_behaviour
super().__init__(
vocab_file=A, tokenizer_file=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, legacy_behaviour=A, **A, )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Optional[int] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : Dict = src_lang if src_lang is not None else 'eng_Latn'
SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self, A, A, A, A, **A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : List[Any] = self(A, add_special_tokens=A, return_tensors=A, **A )
SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : int = tgt_lang_id
return inputs
def UpperCamelCase_ ( self, A, A = "eng_Latn", A = None, A = "fra_Latn", **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A, A, **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE : Tuple = [self.cur_lang_code]
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id]
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE : Dict = [self.cur_lang_code]
SCREAMING_SNAKE_CASE : str = [self.eos_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 246
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A = 25_0004
__A = 25_0020
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = MBartaaTokenizer
__magic_name__ :List[Any] = MBartaaTokenizerFast
__magic_name__ :str = True
__magic_name__ :int = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :str = MBartaaTokenizer(__UpperCAmelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = '<s>'
lowerCAmelCase__ :Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_5_4 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MBartaaTokenizer(__UpperCAmelCase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase__ :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCAmelCase__ :List[Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase__ :str = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = {'input_ids': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def snake_case ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ :Optional[Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[str] = tempfile.mkdtemp()
lowerCAmelCase__ :int = tokenizer_r.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ :List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ :Any = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :str = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ :int = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ :str = tempfile.mkdtemp()
lowerCAmelCase__ :str = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ :Optional[int] = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = """facebook/mbart-large-50-one-to-many-mmt"""
__magic_name__ :Dict = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__magic_name__ :str = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__magic_name__ :Union[str, Any] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def snake_case ( cls ):
'''simple docstring'''
lowerCAmelCase__ :MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCAmelCase__ :List[Any] = 1
return cls
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 2_5_0_0_3_8 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
lowerCAmelCase__ :Dict = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase__ :str = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , __UpperCAmelCase )
lowerCAmelCase__ :Dict = 1_0
lowerCAmelCase__ :List[str] = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = tempfile.mkdtemp()
lowerCAmelCase__ :List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = MBartaaTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase__ :Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCAmelCase__ :List[Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase__ :Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors='pt' )
lowerCAmelCase__ :Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0 , return_tensors='pt' )
lowerCAmelCase__ :str = targets['input_ids']
lowerCAmelCase__ :Union[str, Any] = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# en_XX, A, test, EOS
'input_ids': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 293
|
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ :Dict = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase ):
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase )
| 293
| 1
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[str]=[32, 64, 128] , UpperCAmelCase__ : Tuple=[1, 2, 1] , UpperCAmelCase__ : Optional[int]=[2, 2, 4] , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Dict=2.0 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=1e-5 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=10 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple=["stage1", "stage2"] , UpperCAmelCase__ : str=[1, 2] , ) ->str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = hidden_sizes
A__ = depths
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = patch_norm
A__ = layer_norm_eps
A__ = initializer_range
A__ = is_training
A__ = scope
A__ = use_labels
A__ = type_sequence_label_size
A__ = encoder_stride
A__ = out_features
A__ = out_indices
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Any) ->Any:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = FocalNetModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
A__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
A__ = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]) ->int:
'''simple docstring'''
A__ = FocalNetBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
A__ = None
A__ = FocalNetBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = FocalNetForMaskedImageModeling(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
A__ = 1
A__ = FocalNetForMaskedImageModeling(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = FocalNetForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = FocalNetForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = FocalNetModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , embed_dim=37 , has_text_modality=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@unittest.skip(reason='''FocalNet does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A__ = model_class(UpperCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear))
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
A__ = outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
# FocalNet has a different seq_length
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
A__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ , A__ , A__ , A__ = reshaped_hidden_states[0].shape
A__ = (
reshaped_hidden_states[0].view(UpperCAmelCase__ , UpperCAmelCase__ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
A__ = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
A__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
A__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
A__ = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width))
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = FocalNetModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(UpperCAmelCase__)
for model_class in self.all_model_classes:
A__ = model_class(config=UpperCAmelCase__)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''').to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# verify the logits
A__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = torch.tensor([0.2166, -0.4368, 0.2191]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
UpperCAmelCase__ = FocalNetConfig
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = FocalNetModelTester(self)
| 231
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=14 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = rotary_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = vocab_size - 1
A__ = vocab_size - 1
A__ = vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = 20
A__ = model_class_name(UpperCAmelCase__)
A__ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__)
A__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''')
A__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
A__ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
A__ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__)
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->Any:
'''simple docstring'''
A__ = 20
A__ = model_class_name(UpperCAmelCase__)
A__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
A__ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__)
A__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
A__ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
A__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
@require_flax
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = FlaxGPTJModelTester(self)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
@tooslow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''')
A__ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)
A__ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''')
A__ = False
A__ = model.config.eos_token_id
A__ = jax.jit(model.generate)
A__ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id).sequences
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(UpperCAmelCase__ , UpperCAmelCase__)
A__ , A__ = pt_inputs['''input_ids'''].shape
A__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase__):
A__ = 0
A__ = 1
A__ = 0
A__ = 1
A__ = pt_model_class(UpperCAmelCase__).eval()
A__ = model_class(UpperCAmelCase__ , dtype=jnp.floataa)
A__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase__)
A__ = fx_state
with torch.no_grad():
A__ = pt_model(**UpperCAmelCase__).to_tuple()
A__ = fx_model(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase__)
A__ = model_class.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__)
A__ = fx_model_loaded(**UpperCAmelCase__).to_tuple()
self.assertEqual(
len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output_loaded, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(UpperCAmelCase__ , UpperCAmelCase__)
A__ = pt_model_class(UpperCAmelCase__).eval()
A__ = model_class(UpperCAmelCase__ , dtype=jnp.floataa)
A__ = load_flax_weights_in_pytorch_model(UpperCAmelCase__ , fx_model.params)
A__ , A__ = pt_inputs['''input_ids'''].shape
A__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase__):
A__ = 0
A__ = 1
A__ = 0
A__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A__ = pt_model(**UpperCAmelCase__).to_tuple()
A__ = fx_model(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase__)
A__ = pt_model_class.from_pretrained(UpperCAmelCase__ , from_flax=UpperCAmelCase__)
with torch.no_grad():
A__ = pt_model_loaded(**UpperCAmelCase__).to_tuple()
self.assertEqual(
len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@tooslow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''')
A__ = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase__)
| 231
| 1
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = checkpoints.load_tax_checkpoint(__lowerCAmelCase )
_UpperCAmelCase : List[str] = flatten_dict(__lowerCAmelCase )
return flax_params
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = {}
_UpperCAmelCase : Tuple = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
_UpperCAmelCase : Any = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_UpperCAmelCase : Any = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_UpperCAmelCase : int = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_UpperCAmelCase : Dict = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_UpperCAmelCase : Union[str, Any] = re.sub(R"layers_(\d+)" , R"layer.\1" , __lowerCAmelCase )
_UpperCAmelCase : int = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_UpperCAmelCase : str = re.sub(R"layers_(\d+)" , R"layer.\1" , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = flax_dict[key]
_UpperCAmelCase : List[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_UpperCAmelCase : Optional[Any] = torch.from_numpy(converted_dict[key].T )
else:
_UpperCAmelCase : int = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False ):
_UpperCAmelCase : List[Any] = get_flax_param(__lowerCAmelCase )
if not use_large:
_UpperCAmelCase : Optional[Any] = PixaStructVisionConfig()
_UpperCAmelCase : str = PixaStructTextConfig()
else:
_UpperCAmelCase : Dict = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
_UpperCAmelCase : Tuple = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
_UpperCAmelCase : Dict = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowerCAmelCase )
_UpperCAmelCase : int = PixaStructForConditionalGeneration(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = rename_and_convert_flax_params(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
_UpperCAmelCase : Any = PixaStructImageProcessor()
_UpperCAmelCase : Optional[Any] = PixaStructProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
if use_large:
_UpperCAmelCase : Tuple = 4_096
_UpperCAmelCase : Optional[Any] = True
# mkdir if needed
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
print("Model saved in {}".format(__lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowerCamelCase__ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 234
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = ["input_ids", "token_type_ids"]
lowerCAmelCase : Optional[Any] = FNetTokenizer
def __init__( self : Dict , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : List[str]="[SEP]" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : Optional[Any]="[CLS]" , lowerCamelCase__ : Any="[MASK]" , **lowerCamelCase__ : Any , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = (
AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ , normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Tuple = remove_space
_UpperCAmelCase : List[Any] = keep_accents
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : str = False if not self.vocab_file else True
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 234
| 1
|
from __future__ import annotations
lowerCamelCase_ = list[list[int]]
# assigning initial values to the grid
lowerCamelCase_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCamelCase_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __magic_name__ ( __a : Matrix , __a : int , __a : int , __a : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __magic_name__ ( __a : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __magic_name__ ( __a : Matrix ):
'''simple docstring'''
if location := find_empty_location(__a ):
UpperCamelCase__ , UpperCamelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCamelCase__ = digit
if sudoku(__a ) is not None:
return grid
UpperCamelCase__ = 0
return None
def __magic_name__ ( __a : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
lowerCamelCase_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 178
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase_ = spec.loader.load_module()
lowerCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase_ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCamelCase_ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase__ = False
# source code of `config_class`
UpperCamelCase__ = inspect.getsource(__a )
UpperCamelCase__ = _re_checkpoint.findall(__a )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase__ , UpperCamelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ = True
break
UpperCamelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(sorted(__a ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 178
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = False
lowerCAmelCase_ = TimmBackboneConfig
def __init__( self : str , _A : Dict , **_A : Optional[Any] ):
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(_A )
__SCREAMING_SNAKE_CASE : List[str] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(_A , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(_A , '''use_pretrained_backbone''' , _A )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
__SCREAMING_SNAKE_CASE : Optional[int] = config.out_indices if getattr(_A , '''out_indices''' , _A ) is not None else (-1,)
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model(
config.backbone , pretrained=_A , features_only=config.features_only , in_chans=config.num_channels , out_indices=_A , **_A , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__SCREAMING_SNAKE_CASE : Optional[int] = self._backbone.return_layers
__SCREAMING_SNAKE_CASE : Tuple = {layer['''module''']: str(_A ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_A )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _A : List[str] , *_A : Any , **_A : List[str] ):
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''config''' , TimmBackboneConfig() )
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''use_timm_backbone''' , _A )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''num_channels''' , config.num_channels )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''features_only''' , config.features_only )
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''out_indices''' , config.out_indices )
__SCREAMING_SNAKE_CASE : List[Any] = TimmBackboneConfig(
backbone=_A , num_channels=_A , features_only=_A , use_pretrained_backbone=_A , out_indices=_A , )
return super()._from_config(_A , **_A )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[int] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Tuple , _A : int , _A : int=None , _A : List[str]=None , _A : Optional[int]=None , **_A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__SCREAMING_SNAKE_CASE : int = self._all_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = self._backbone(_A , **_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._return_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self._backbone(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = tuple(_A )
__SCREAMING_SNAKE_CASE : Dict = tuple(_A ) if hidden_states is not None else None
if not return_dict:
__SCREAMING_SNAKE_CASE : int = (feature_maps,)
if output_hidden_states:
__SCREAMING_SNAKE_CASE : int = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_A , hidden_states=_A , attentions=_A )
| 303
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
snake_case : Optional[Any] = logging.getLogger(__name__)
class snake_case_ (A_ ):
UpperCAmelCase__ : int = "masked_bert"
def __init__( self :str ,__snake_case :int=3_05_22 ,__snake_case :Any=7_68 ,__snake_case :str=12 ,__snake_case :str=12 ,__snake_case :Any=30_72 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :List[Any]=5_12 ,__snake_case :Tuple=2 ,__snake_case :Dict=0.02 ,__snake_case :str=1E-12 ,__snake_case :Tuple=0 ,__snake_case :Optional[Any]="topK" ,__snake_case :Optional[Any]="constant" ,__snake_case :str=0.0 ,**__snake_case :Tuple ,) -> Dict:
super().__init__(pad_token_id=snake_case__ ,**snake_case__ )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = pruning_method
a__ = mask_init
a__ = mask_scale
| 360
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case : Tuple = logging.get_logger(__name__)
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetaDModel
UpperCAmelCase__ : str = '''sample'''
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return (3, 32, 32)
@property
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
return (3, 32, 32)
def lowerCamelCase__( self :str ) -> Tuple:
a__ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = UNetaDModel
UpperCAmelCase__ : Any = '''sample'''
@property
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = 4
a__ = 4
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Any ) -> str:
return (4, 32, 32)
@property
def lowerCamelCase__( self :Any ) -> Dict:
return (4, 32, 32)
def lowerCamelCase__( self :int ) -> int:
a__ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :str ) -> Any:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model_accelerate.to(__snake_case )
model_accelerate.eval()
a__ = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
a__ = model_accelerate(__snake_case ,__snake_case )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case ,low_cpu_mem_usage=__snake_case )
model_normal_load.to(__snake_case )
model_normal_load.eval()
a__ = model_normal_load(__snake_case ,__snake_case )['sample']
assert torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__snake_case )
a__ = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 ) )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = UNetaDModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any]=(32, 32) ) -> Optional[int]:
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (3, 32, 32)
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return (3, 32, 32)
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
a__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__( self :str ) -> Tuple:
a__ , a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = self.dummy_input
a__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(__snake_case )
a__ = noise
a__ = model(**__snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (2_56, 2_56)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :Dict ) -> int:
a__ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :int ) -> str:
# not required for this model
pass
| 109
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Tuple:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = 13
lowerCamelCase = 7
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = 99
lowerCamelCase = 3_84
lowerCamelCase = 2
lowerCamelCase = 4
lowerCamelCase = 37
lowerCamelCase = """gelu"""
lowerCamelCase = 0.1
lowerCamelCase = 0.1
lowerCamelCase = 5_12
lowerCamelCase = 16
lowerCamelCase = 2
lowerCamelCase = 0.02
lowerCamelCase = 3
lowerCamelCase = 4
lowerCamelCase = 1_28
lowerCamelCase = 2
lowerCamelCase = 9
lowerCamelCase = 1
lowerCamelCase = None
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , A , A , A , A , A , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = TFConvBertModel(config=_A )
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase = [input_ids, input_mask]
lowerCamelCase = model(_A )
lowerCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TFConvBertForMaskedLM(config=_A )
lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFConvBertForSequenceClassification(config=_A )
lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> str:
'''simple docstring'''
lowerCamelCase = self.num_choices
lowerCamelCase = TFConvBertForMultipleChoice(config=_A )
lowerCamelCase = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFConvBertForTokenClassification(config=_A )
lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> str:
'''simple docstring'''
lowerCamelCase = TFConvBertForQuestionAnswering(config=_A )
lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
lowerCamelCase
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[Any] = False
UpperCamelCase : int = False
UpperCamelCase : List[str] = False
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TFConvBertModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def __A ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = True
lowerCamelCase = True
if hasattr(_A , """use_cache""" ):
lowerCamelCase = True
lowerCamelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase = getattr(self.model_tester , """key_length""" , _A )
for model_class in self.all_model_classes:
lowerCamelCase = self._prepare_for_class(_A , _A )
lowerCamelCase = model_class(_A )
lowerCamelCase = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
lowerCamelCase = os.path.join(_A , """saved_model""" , """1""" )
lowerCamelCase = tf.keras.models.load_model(_A )
lowerCamelCase = model(_A )
if self.is_encoder_decoder:
lowerCamelCase = outputs["""encoder_hidden_states"""]
lowerCamelCase = outputs["""encoder_attentions"""]
else:
lowerCamelCase = outputs["""hidden_states"""]
lowerCamelCase = outputs["""attentions"""]
self.assertEqual(len(_A ) , _A )
lowerCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_A )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = True
lowerCamelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase = getattr(self.model_tester , """key_length""" , _A )
lowerCamelCase = getattr(self.model_tester , """key_length""" , _A )
def check_decoder_attentions_output(A ):
lowerCamelCase = len(_A )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A ):
lowerCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = model_class(_A )
lowerCamelCase = model(self._prepare_for_class(_A , _A ) )
lowerCamelCase = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
lowerCamelCase = model_class(_A )
lowerCamelCase = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase = True
lowerCamelCase = model_class(_A )
lowerCamelCase = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = model_class(_A )
lowerCamelCase = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_A )[0]
lowerCamelCase = [1, 6, 7_68]
self.assertEqual(output.shape , _A )
lowerCamelCase = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1e-4 )
| 252
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _UpperCAmelCase ( __a):
__a : Optional[torch.FloatTensor] = None
__a : torch.FloatTensor = None
__a : Optional[Tuple[torch.FloatTensor]] = None
__a : Optional[Tuple[torch.FloatTensor]] = None
class _UpperCAmelCase ( __a):
def __init__( self , _A=1 , _A=0 , _A=2 , _A=5_12 , _A="cls" , _A=False , _A=True , **_A , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCAmelCase : int = project_dim
_UpperCAmelCase : str = pooler_fn
_UpperCAmelCase : Union[str, Any] = learn_encoder
_UpperCAmelCase : Tuple = use_attention_mask
class _UpperCAmelCase ( __a):
__a : str = [R"""pooler""", R"""logit_scale"""]
__a : str = [R"""position_ids""", R"""predictions.decoder.bias"""]
__a : int = """roberta"""
__a : Optional[int] = RobertaSeriesConfig
def __init__( self , _A ) -> List[Any]:
'''simple docstring'''
super().__init__(_A )
_UpperCAmelCase : Dict = XLMRobertaModel(_A )
_UpperCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase : Optional[int] = getattr(_A , """has_pre_transformation""" , _A )
if self.has_pre_transformation:
_UpperCAmelCase : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __snake_case ( self , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Any = self.base_model(
input_ids=_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_attentions=_A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_A , )
if self.has_pre_transformation:
_UpperCAmelCase : Optional[int] = outputs["""hidden_states"""][-2]
_UpperCAmelCase : str = self.pre_LN(_A )
_UpperCAmelCase : str = self.transformation_pre(_A )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 246
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=1_0 , lowerCAmelCase_ : Any=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : int=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : int=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = embeddings_size
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_act
lowercase_ = num_labels
lowercase_ = scope
lowercase_ = len(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = self.get_config()
return config, pixel_values
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = FlaxRegNetModel(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = FlaxRegNetForImageClassification(config=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = FlaxRegNetModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_)
@unittest.skip(reason="""RegNet does not use inputs_embeds""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
lowercase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_) , expected_num_stages + 1)
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any]):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = model_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = model_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""") if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""")
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""np""")
lowercase_ = model(**lowerCAmelCase_)
# verify the logits
lowercase_ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4))
| 313
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = 0
if start < end:
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ , lowercase_ = _in_place_partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
count += _in_place_quick_sort(__lowerCAmelCase , __lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(__lowerCAmelCase , p + 1 , __lowerCAmelCase )
return count
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = randint(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = a[end]
lowercase_ = a[pivot]
lowercase_ = temp
lowercase_ = start - 1
for index in range(__lowerCAmelCase , __lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase_ = new_pivot_index + 1
lowercase_ = a[new_pivot_index]
lowercase_ = a[index]
lowercase_ = temp
lowercase_ = a[new_pivot_index + 1]
lowercase_ = a[end]
lowercase_ = temp
return new_pivot_index + 1, count
UpperCAmelCase : Union[str, Any] = TemporaryFile()
UpperCAmelCase : Optional[int] = 100 # 1000 elements are to be sorted
UpperCAmelCase , UpperCAmelCase : List[str] = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[str] = np.load(outfile)
UpperCAmelCase : List[Any] = len(M) - 1
UpperCAmelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 313
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _lowerCAmelCase ( __a ):
_lowercase ='''informer'''
_lowercase ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "mean" , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 64 , _UpperCamelCase = 32 , _UpperCamelCase = 32 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = True , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.05 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 100 , _UpperCamelCase = 0.02 , _UpperCamelCase=True , _UpperCamelCase = "prob" , _UpperCamelCase = 5 , _UpperCamelCase = True , **_UpperCamelCase , ) -> List[str]:
# time series specific configuration
lowerCAmelCase_ = prediction_length
lowerCAmelCase_ = context_length or prediction_length
lowerCAmelCase_ = distribution_output
lowerCAmelCase_ = loss
lowerCAmelCase_ = input_size
lowerCAmelCase_ = num_time_features
lowerCAmelCase_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ = scaling
lowerCAmelCase_ = num_dynamic_real_features
lowerCAmelCase_ = num_static_real_features
lowerCAmelCase_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase_ = cardinality
else:
lowerCAmelCase_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase_ = embedding_dimension
else:
lowerCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = use_cache
# Informer
lowerCAmelCase_ = attention_type
lowerCAmelCase_ = sampling_factor
lowerCAmelCase_ = distil
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def __a ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 231
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
lowercase__ = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
lowercase__ = BigBirdForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , is_trivia_qa=SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 93
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 93
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __UpperCAmelCase ( ):
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=a_ , default='microsoft/unixcoder-base-nine')
parser.add_argument('--num_epochs' , type=a_ , default=5)
parser.add_argument('--batch_size' , type=a_ , default=6)
parser.add_argument('--gradient_accumulation_steps' , type=a_ , default=1)
parser.add_argument('--freeze' , type=a_ , default=a_)
parser.add_argument('--learning_rate' , type=a_ , default=5E-4)
parser.add_argument('--seed' , type=a_ , default=0)
parser.add_argument('--lr_scheduler_type' , type=a_ , default='cosine')
parser.add_argument('--num_warmup_steps' , type=a_ , default=10)
parser.add_argument('--weight_decay' , type=a_ , default=0.01)
parser.add_argument('--output_dir' , type=a_ , default='./results')
return parser.parse_args()
lowercase = load("accuracy")
def __UpperCAmelCase ( a_):
snake_case_ , snake_case_ = eval_pred
snake_case_ = np.argmax(a_ , axis=1)
return metric.compute(predictions=a_ , references=a_)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a ) -> None:
super().__init__()
snake_case_ = trainer
def _UpperCamelCase ( self , a , a , a , **a ) -> List[Any]:
if control.should_evaluate:
snake_case_ = deepcopy(a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def __UpperCAmelCase ( ):
snake_case_ = get_args()
set_seed(args.seed)
snake_case_ = load_dataset('codeparrot/codecomplex' , split='train')
snake_case_ = dataset.train_test_split(test_size=0.2)
snake_case_ = train_test['test'].train_test_split(test_size=0.5)
snake_case_ = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
})
print('Loading tokenizer and model')
snake_case_ = AutoTokenizer.from_pretrained(args.model_ckpt)
snake_case_ = tokenizer.eos_token
snake_case_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7)
snake_case_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
snake_case_ = False
snake_case_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'])))
def tokenize(a_):
snake_case_ = tokenizer(example['src'] , truncation=a_ , max_length=10_24)
snake_case_ = labels.straint(example['complexity'])
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
snake_case_ = train_test_validation.map(
a_ , batched=a_ , remove_columns=train_test_validation['train'].column_names , )
snake_case_ = DataCollatorWithPadding(tokenizer=a_)
snake_case_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
snake_case_ = Trainer(
model=a_ , args=a_ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=a_ , data_collator=a_ , compute_metrics=a_ , )
print('Training...')
trainer.add_callback(CustomCallback(a_))
trainer.train()
if __name__ == "__main__":
main()
| 178
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A, A, A = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = row, column
SCREAMING_SNAKE_CASE : Tuple = [[default_value for c in range(A )] for r in range(A )]
def __str__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE : Dict = max(A, len(str(A ) ) )
SCREAMING_SNAKE_CASE : List[Any] = F"%{max_element_length}s"
# Make string and return
def single_line(A ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE : Optional[Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(A ) for row_vector in self.array )
return s
def __repr__( self ):
'''simple docstring'''
return str(self )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not (isinstance(A, (list, tuple) ) and len(A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, A ):
'''simple docstring'''
assert self.validate_indicies(A )
return self.array[loc[0]][loc[1]]
def __setitem__( self, A, A ):
'''simple docstring'''
assert self.validate_indicies(A )
SCREAMING_SNAKE_CASE : Tuple = value
def __add__( self, A ):
'''simple docstring'''
assert isinstance(A, A )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE : List[Any] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Tuple = self[r, c] + another[r, c]
return result
def __neg__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : int = -self[r, c]
return result
def __sub__( self, A ):
'''simple docstring'''
return self + (-another)
def __mul__( self, A ):
'''simple docstring'''
if isinstance(A, (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE : Any = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : List[str] = self[r, c] * another
return result
elif isinstance(A, A ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(self.row, another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE : int = F"Unsupported type given for another ({type(A )})"
raise TypeError(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Matrix(self.column, self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Any = self[r, c]
return result
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
assert isinstance(A, A ) and isinstance(A, A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE : Any = v.transpose()
SCREAMING_SNAKE_CASE : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Matrix(3 ,3 ,0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE : str = 1
print(f"a^(-1) is {ainv}" )
# u, v
SCREAMING_SNAKE_CASE : Tuple = Matrix(3 ,1 ,0 )
SCREAMING_SNAKE_CASE : List[Any] = 1, 2, -3
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(3 ,1 ,0 )
SCREAMING_SNAKE_CASE : List[str] = 4, -2, 5
print(f"u is {u}" )
print(f"v is {v}" )
print(f"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(__UpperCamelCase ,__UpperCamelCase )}" )
def lowercase__( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 356
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase_ = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
UpperCamelCase_ = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
UpperCamelCase_ = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'], reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
], )
def UpperCamelCase_ ( self, A, A, A=None, A=True, A=False ):
'''simple docstring'''
if rouge_types is None:
SCREAMING_SNAKE_CASE : List[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE : int = rouge_scorer.RougeScorer(rouge_types=A, use_stemmer=A )
if use_aggregator:
SCREAMING_SNAKE_CASE : Tuple = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for ref, pred in zip(A, A ):
SCREAMING_SNAKE_CASE : Tuple = scorer.score(A, A )
if use_aggregator:
aggregator.add_scores(A )
else:
scores.append(A )
if use_aggregator:
SCREAMING_SNAKE_CASE : Union[str, Any] = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE : int = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE : List[str] = [score[key] for score in scores]
return result
| 246
| 0
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = CLIPConfig
lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self : int,lowercase_ : CLIPConfig )-> Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase_ )
A__ = CLIPVisionModelWithProjection(config.vision_config )
A__ = nn.Linear(config.vision_config.projection_dim,1 )
A__ = nn.Linear(config.vision_config.projection_dim,1 )
@torch.no_grad()
def snake_case__ ( self : Optional[int],lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : str=0.5,lowercase_ : List[Any]=0.5 )-> List[str]:
'''simple docstring'''
A__ = self.vision_model(lowercase_ )[0]
A__ = self.p_head(lowercase_ )
A__ = nsfw_detected.flatten()
A__ = nsfw_detected > p_threshold
A__ = nsfw_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowercase_ ):
if nsfw_detected_:
A__ = np.zeros(images[idx].shape )
A__ = self.w_head(lowercase_ )
A__ = watermark_detected.flatten()
A__ = watermark_detected > w_threshold
A__ = watermark_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowercase_ ):
if watermark_detected_:
A__ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 7
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( UpperCamelCase : Callable , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : int = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Optional[int] = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
_lowerCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self : Tuple ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
A_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(UpperCAmelCase__ )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase__ )
else:
A_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A_ = 2
A_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , )
A_ = floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((64, 64) )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __A ( self : Optional[Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __A ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __A ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _a ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase : Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __A ( self : str ):
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase : Any ):
if isinstance(UpperCAmelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
A_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
A_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(UpperCAmelCase__ )
A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A_ = MultiControlNetModel([controlneta, controlneta] )
A_ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase__ )
else:
A_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A_ = 2
A_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
]
A_ = floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((64, 64) )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __A ( self : Tuple ):
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
A_ = 10.0
A_ = 4
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ )[0]
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
A_ = self.get_dummy_inputs(UpperCAmelCase__ )
A_ = steps
A_ = scale
A_ = pipe(**UpperCAmelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __A ( self : List[str] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __A ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __A ( self : List[str] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __A ( self : str ):
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
A_ = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
A_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase__ , controlnet=UpperCAmelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = "evil space-punk bird"
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
A_ = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
A_ = pipe(
UpperCAmelCase__ , UpperCAmelCase__ , control_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
A_ = output.images[0]
assert image.shape == (512, 512, 3)
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 361
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = embeddings_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self ) ->Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = FlaxRegNetModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxRegNetForImageClassification(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : List[str] = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Dict:
return
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : int = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase , **_lowerCamelCase ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Any = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : List[Any] = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->int:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE :Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__SCREAMING_SNAKE_CASE :Union[str, Any] = 250004
__SCREAMING_SNAKE_CASE :str = 250020
@require_sentencepiece
@require_tokenizers
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = MBartaaTokenizer
_lowerCamelCase : List[str] = MBartaaTokenizerFast
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Dict = True
def lowercase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self : int ):
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__a ) , 1_0_5_4 )
def lowercase ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def lowercase ( self : int ):
_UpperCAmelCase = MBartaaTokenizer(__a , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__a )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def lowercase ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a )
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(__a )
_UpperCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(__a )
_UpperCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
_UpperCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(__a )
_UpperCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
_UpperCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(__a )
_UpperCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
_lowerCamelCase : List[Any] = """facebook/mbart-large-50-one-to-many-mmt"""
_lowerCamelCase : Union[str, Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_lowerCamelCase : Dict = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_lowerCamelCase : List[str] = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def lowercase ( cls : str ):
_UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
_UpperCAmelCase = 1
return cls
def lowercase ( self : Optional[int] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 2_5_0_0_3_8 )
def lowercase ( self : str ):
_UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def lowercase ( self : str ):
self.assertIn(__a , self.tokenizer.all_special_ids )
_UpperCAmelCase = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_UpperCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
_UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowercase ( self : Dict ):
_UpperCAmelCase = ["this is gunna be a long sentence " * 2_0]
assert isinstance(src_text[0] , __a )
_UpperCAmelCase = 1_0
_UpperCAmelCase = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[0] , __a )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__a ) , __a )
def lowercase ( self : int ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
_UpperCAmelCase = MBartaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
_UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
_UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=1_0 , return_tensors="pt" )
_UpperCAmelCase = targets["input_ids"]
_UpperCAmelCase = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"input_ids": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 2_5_0_0_0_1,
} , )
| 362
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
_UpperCAmelCase = set()
# Replace all the whitespace in our sentence
_UpperCAmelCase = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowercase ) == 26
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
_UpperCAmelCase = [False] * 26
for char in input_str:
if char.islower():
_UpperCAmelCase = True
elif char.isupper():
_UpperCAmelCase = True
return all(__lowercase )
def UpperCAmelCase_ ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
_UpperCAmelCase = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__lowercase ) )
print(timeit("is_pangram_faster()" , setup=__lowercase ) )
print(timeit("is_pangram_fastest()" , setup=__lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 156
| 0
|
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_lowercase : Union[str, Any] = "Usage of script: script_name <size_of_canvas:int>"
_lowercase : Union[str, Any] = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Union[str, Any] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
"""simple docstring"""
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = bool(random.getrandbits(1 ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
"""simple docstring"""
lowercase_ : str = np.array(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase_ : Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def snake_case_ ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
"""simple docstring"""
lowercase_ : List[str] = 0
lowercase_ : Tuple = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase_ : Union[str, Any] = pt
if pt:
if alive < 2:
lowercase_ : Optional[int] = False
elif alive == 2 or alive == 3:
lowercase_ : int = True
elif alive > 3:
lowercase_ : Any = False
else:
if alive == 3:
lowercase_ : str = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_lowercase : Tuple = int(sys.argv[1])
# main working structure of this module.
_lowercase : int = create_canvas(canvas_size)
seed(c)
_lowercase , _lowercase : List[str] = plt.subplots()
fig.show()
_lowercase : str = ListedColormap(["w", "k"])
try:
while True:
_lowercase : str = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 93
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ["""ViTFeatureExtractor"""]
_lowercase : Optional[int] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__:
def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float = 0 )-> None:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = row, column
UpperCAmelCase = [[default_value for c in range(lowerCAmelCase )] for r in range(lowerCAmelCase )]
def __str__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
UpperCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase = max(lowerCAmelCase , len(str(lowerCAmelCase ) ) )
UpperCAmelCase = F"""%{max_element_length}s"""
# Make string and return
def single_line(lowerCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
UpperCAmelCase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase ) for row_vector in self.array )
return s
def __repr__( self : Tuple )-> str:
"""simple docstring"""
return str(self )
def a__( self : str , lowerCAmelCase : tuple[int, int] )-> bool:
"""simple docstring"""
if not (isinstance(lowerCAmelCase , (list, tuple) ) and len(lowerCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , lowerCAmelCase : tuple[int, int] )-> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , lowerCAmelCase : tuple[int, int] , lowerCAmelCase : float )-> None:
"""simple docstring"""
assert self.validate_indicies(lowerCAmelCase )
UpperCAmelCase = value
def __add__( self : int , lowerCAmelCase : Matrix )-> Matrix:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : Dict )-> Matrix:
"""simple docstring"""
UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = -self[r, c]
return result
def __sub__( self : Union[str, Any] , lowerCAmelCase : Matrix )-> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : Union[str, Any] , lowerCAmelCase : int | float | Matrix )-> Matrix:
"""simple docstring"""
if isinstance(lowerCAmelCase , (int, float) ): # Scalar multiplication
UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = self[r, c] * another
return result
elif isinstance(lowerCAmelCase , lowerCAmelCase ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase = F"""Unsupported type given for another ({type(lowerCAmelCase )})"""
raise TypeError(lowerCAmelCase )
def a__( self : Optional[Any] )-> Matrix:
"""simple docstring"""
UpperCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = self[r, c]
return result
def a__( self : Tuple , lowerCAmelCase : Matrix , lowerCAmelCase : Matrix )-> Any:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase = v.transpose()
UpperCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
UpperCAmelCase = Matrix(3 , 1 , 0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1, 2, -3
UpperCAmelCase = Matrix(3 , 1 , 0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(A , A )}""" )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 91
| 1
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : Any , __A : Any , __A : Any=2 , __A : List[str]=3 , __A : Any=4 , __A : int=2 , __A : Union[str, Any]=7 , __A : List[str]=True , __A : int=True , __A : Optional[Any]=True , __A : Optional[Any]=True , __A : List[str]=9_9 , __A : Optional[int]=3_6 , __A : Optional[int]=3 , __A : List[Any]=4 , __A : Union[str, Any]=3_7 , __A : Dict="gelu" , __A : Dict=0.1 , __A : str=0.1 , __A : Dict=5_1_2 , __A : int=1_6 , __A : Optional[Any]=2 , __A : int=0.02 , __A : Dict=6 , __A : Union[str, Any]=6 , __A : int=3 , __A : Optional[int]=4 , __A : List[str]=None , __A : List[Any]=1_0_0_0 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = text_seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = coordinate_size
__UpperCamelCase = shape_size
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
__UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCamelCase = text_seq_length
__UpperCamelCase = (image_size // patch_size) ** 2 + 1
__UpperCamelCase = self.text_seq_length + self.image_seq_length
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCamelCase = bbox[i, j, 3]
__UpperCamelCase = bbox[i, j, 1]
__UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCamelCase = bbox[i, j, 2]
__UpperCamelCase = bbox[i, j, 0]
__UpperCamelCase = t
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCamelCase ( self : Any , __A : List[str] , __A : Tuple , __A : str , __A : Optional[int] , __A : Optional[int] , __A : str , __A : Union[str, Any] , __A : List[str] ):
__UpperCamelCase = LayoutLMvaModel(config=_A )
model.to(_A )
model.eval()
# text + image
__UpperCamelCase = model(_A , pixel_values=_A )
__UpperCamelCase = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A )
__UpperCamelCase = model(_A , bbox=_A , pixel_values=_A , token_type_ids=_A )
__UpperCamelCase = model(_A , bbox=_A , pixel_values=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCamelCase = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCamelCase = model(pixel_values=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Optional[Any] , __A : str , __A : List[Any] , __A : Any , __A : List[str] , __A : List[Any] , __A : Optional[int] , __A : Tuple , __A : Union[str, Any] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = LayoutLMvaForSequenceClassification(_A )
model.to(_A )
model.eval()
__UpperCamelCase = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , __A : str , __A : int , __A : Tuple , __A : List[str] , __A : Union[str, Any] , __A : Optional[int] , __A : Union[str, Any] , __A : Optional[Any] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = LayoutLMvaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__UpperCamelCase = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[Any] , __A : Optional[Any] , __A : str , __A : str , __A : Union[str, Any] , __A : int , __A : Any , __A : str , __A : Optional[Any] ):
__UpperCamelCase = LayoutLMvaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__UpperCamelCase = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
__UpperCamelCase
) = config_and_inputs
__UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( __a , __a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =False
SCREAMING_SNAKE_CASE_ : Tuple =False
SCREAMING_SNAKE_CASE_ : str =False
SCREAMING_SNAKE_CASE_ : List[str] =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : int =(
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : Any , __A : Union[str, Any] , __A : Any , __A : Dict , __A : List[Any] , __A : str ):
return True
def _lowerCamelCase ( self : str ):
__UpperCamelCase = LayoutLMvaModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def _lowerCamelCase ( self : Tuple , __A : str , __A : Any , __A : Tuple=False ):
__UpperCamelCase = copy.deepcopy(_A )
if model_class in get_values(_A ):
__UpperCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
__UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in get_values(_A ):
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
__UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_A , )
return inputs_dict
def _lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase = type
self.model_tester.create_and_check_model(*_A )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase__ ( ) -> int:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : int ):
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_A )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=_A , return_tensors='pt' ).pixel_values.to(_A )
__UpperCamelCase = torch.tensor([[1, 2]] )
__UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__UpperCamelCase = model(
input_ids=input_ids.to(_A ) , bbox=bbox.to(_A ) , pixel_values=pixel_values.to(_A ) , )
# verify the logits
__UpperCamelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , _A )
__UpperCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 53
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Any:
_UpperCAmelCase : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase : str = emb.weight.shape
_UpperCAmelCase : Union[str, Any] = nn.Linear(_lowerCAmelCase, _lowerCAmelCase, bias=_lowerCAmelCase )
_UpperCAmelCase : List[str] = emb.weight.data
return lin_layer
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : Dict="facebook/mbart-large-en-ro", _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Union[str, Any]=False ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCAmelCase )
_UpperCAmelCase : int = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_UpperCAmelCase : str = MBartConfig.from_pretrained(_lowerCAmelCase, vocab_size=_lowerCAmelCase )
if mbart_aa and finetuned:
_UpperCAmelCase : Any = """relu"""
_UpperCAmelCase : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""]
_UpperCAmelCase : Any = MBartForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase )
if finetuned:
_UpperCAmelCase : List[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCamelCase__ : str = parser.parse_args()
lowerCamelCase__ : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 246
| 0
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_UpperCamelCase: str = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_UpperCamelCase: Any = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_UpperCamelCase: Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_UpperCamelCase: Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_UpperCamelCase: Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_UpperCamelCase: Any = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_UpperCamelCase: Optional[int] = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase , lowercase : str = randrange(len(_a ) ), randrange(len(_a ) )
lowercase : Optional[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowercase , lowercase : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase__ ( _UpperCAmelCase = 1_00 ) -> int:
'''simple docstring'''
return (generate_random_hand() for _ in range(_a ))
@pytest.mark.parametrize('hand, expected' , _a )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(_a )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert PokerHand(_a )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _a )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] = PokerHand(_a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _a )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(_a )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
assert PokerHand(_a )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _a )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
def lowercase__ ( ) -> Dict:
'''simple docstring'''
lowercase : Any = [PokerHand(_a ) for hand in SORTED_HANDS]
lowercase : str = poker_hands.copy()
shuffle(_a )
lowercase : List[str] = chain(sorted(_a ) )
for index, hand in enumerate(_a ):
assert hand == poker_hands[index]
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = PokerHand('2C 4S AS 3D 5C' )
lowercase : Optional[Any] = True
lowercase : Dict = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Dict = 0
lowercase : Any = os.path.abspath(os.path.dirname(_a ) )
lowercase : Union[str, Any] = os.path.join(_a , 'poker_hands.txt' )
with open(_a ) as file_hand:
for line in file_hand:
lowercase : int = line[:14].strip()
lowercase : Tuple = line[15:].strip()
lowercase , lowercase : int = PokerHand(_a ), PokerHand(_a )
lowercase : Optional[Any] = player.compare_with(_a )
if output == "Win":
answer += 1
assert answer == 3_76
| 360
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.