code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
snake_case__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case__ : Tuple = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCamelCase ( self : Tuple ):
snake_case__ : Optional[Any] = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
snake_case__ : str = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 35
|
"""simple docstring"""
from math import sqrt
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 288
| 0
|
from math import loga
def __UpperCamelCase ( _A ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_snake_case , _snake_case ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = np.nan
for i in range(_a ):
lowerCAmelCase__ : Dict = features[:, labels == i]
lowerCAmelCase__ : List[str] = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ : Optional[Any] = data - column_reshape(_a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ : str = np.dot(_a , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = features.mean(1 )
lowerCAmelCase__ : Any = np.nan
for i in range(_a ):
lowerCAmelCase__ : List[Any] = features[:, labels == i]
lowerCAmelCase__ : Tuple = data.shape[1]
lowerCAmelCase__ : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ : int = device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if features.any():
lowerCAmelCase__ : Tuple = features.mean(1 )
# Center the dataset
lowerCAmelCase__ : Dict = features - np.reshape(_a , (data_mean.size, 1) )
lowerCAmelCase__ : Dict = np.dot(_a , centered_data.T ) / features.shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = np.linalg.eigh(_a )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ : Any = np.dot(filtered_eigenvectors.T , _a )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=_a )
logging.error('''Dataset empty''' )
raise AssertionError
def lowerCamelCase_ ( _a , _a , _a , _a ):
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = eigh(
covariance_between_classes(_a , _a , _a ) , covariance_within_classes(_a , _a , _a ) , )
lowerCAmelCase__ : Optional[Any] = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = np.linalg.svd(_a )
lowerCAmelCase__ : Optional[Any] = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ : Optional[int] = np.dot(filtered_svd_matrix.T , _a )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=_a )
logging.error('''Dataset empty''' )
raise AssertionError
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ : Dict = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_a ) as error_info:
lowerCAmelCase__ : Tuple = linear_discriminant_analysis(
_a , _a , _a , _a )
if isinstance(_a , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ : Any = 2
lowerCAmelCase__ : Union[str, Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_a ) as error_info:
lowerCAmelCase__ : str = principal_component_analysis(_a , _a )
if not np.allclose(_a , _a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _a :
_a : nn.Module
_a : List[nn.Module] = field(default_factory=_lowercase)
_a : list = field(default_factory=_lowercase)
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tensor , _SCREAMING_SNAKE_CASE : Tensor )-> Any:
lowerCAmelCase__ : str = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__( self : Any )-> Union[str, Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
_a : nn.Module
_a : nn.Module
_a : int = 1
_a : List = field(default_factory=_lowercase)
_a : List = field(default_factory=_lowercase)
_a : bool = True
def __call__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str:
lowerCAmelCase__ : List[Any] = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase__ : str = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase__ : List[str] = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(_SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class _a ( nn.Module):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : nn.Module )-> Optional[int]:
super().__init__()
lowerCAmelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), F'Unexpected layer name {k}'
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) + 1
feature_blocks.append((F'res{block_index}', v) )
lowerCAmelCase__ : List[str] = nn.ModuleDict(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Tensor )-> List[str]:
return get_trunk_forward_outputs(
_SCREAMING_SNAKE_CASE , out_feat_keys=_SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , )
class _a ( _lowercase):
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )-> str:
lowerCAmelCase__ : int = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
lowerCAmelCase__ : Optional[Any] = self.convert_name_to_timm(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = partial(lambda: (timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval(), None) )
else:
lowerCAmelCase__ : Any = super().__getitem__(_SCREAMING_SNAKE_CASE )
return val
class _a ( _lowercase):
def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
lowerCAmelCase__ : int = RegNetModel
else:
lowerCAmelCase__ : List[str] = RegNetForImageClassification
return val
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
for from_key, to_key in keys:
lowerCAmelCase__ : Optional[Any] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def lowerCamelCase_ ( _a , _a , _a , _a , _a , _a = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ : int = from_model_func()
lowerCAmelCase__ : Optional[Any] = our_model_func(_a ).eval()
lowerCAmelCase__ : int = ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(_a )
if from_state_dict is not None:
lowerCAmelCase__ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : List[Any] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowerCAmelCase__ : int = manually_copy_vissl_head(_a , our_model.state_dict() , _a )
our_model.load_state_dict(_a )
lowerCAmelCase__ : List[str] = our_model(_a , output_hidden_states=_a )
lowerCAmelCase__ : Dict = (
our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state
)
lowerCAmelCase__ : Tuple = from_model(_a )
lowerCAmelCase__ : int = from_output[-1] if type(_a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : Optional[int] = our_outputs.hidden_states[-1]
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_a , )
lowerCAmelCase__ : Optional[int] = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowerCAmelCase__ : int = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_a , )
print(f'Pushed {name}' )
def lowerCamelCase_ ( _a , _a = None , _a = True ):
"""simple docstring"""
lowerCAmelCase__ : str = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : Dict = 1_000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase__ : Dict = {int(_a ): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Dict = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
lowerCAmelCase__ : Tuple = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
lowerCAmelCase__ : Optional[Any] = NameToOurModelFuncMap()
lowerCAmelCase__ : Optional[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_a , _a ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location='''cpu''' )
lowerCAmelCase__ : int = model_func()
# check if we have a head, if yes add it
lowerCAmelCase__ : int = files['''classy_state_dict''']['''base_model''']['''model''']
lowerCAmelCase__ : Tuple = model_state_dict['''trunk''']
model.load_state_dict(_a )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase__ : int = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : Tuple = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowerCAmelCase__ : List[Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Union[str, Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : Union[str, Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 131
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE = "cpu",__SCREAMING_SNAKE_CASE = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__lowerCAmelCase = device
__lowerCAmelCase = CLIPTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [0.4814_5466, 0.457_8275, 0.4082_1073]
__lowerCAmelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__lowerCAmelCase = torchvision.transforms.Normalize(self.image_mean,self.image_std )
__lowerCAmelCase = torchvision.transforms.Resize(2_24 )
__lowerCAmelCase = torchvision.transforms.CenterCrop(2_24 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.resize(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.center_crop(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.normalize(__SCREAMING_SNAKE_CASE )
return images
def __call__( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.tokenizer(text=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.preprocess_img(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=10,__SCREAMING_SNAKE_CASE=0.01,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="image",__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = None
__lowerCAmelCase = device if device else get_device()
if vqgan:
__lowerCAmelCase = vqgan
else:
__lowerCAmelCase = load_vqgan(self.device,conf_path=__SCREAMING_SNAKE_CASE,ckpt_path=__SCREAMING_SNAKE_CASE )
self.vqgan.eval()
if clip:
__lowerCAmelCase = clip
else:
__lowerCAmelCase = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__lowerCAmelCase = ProcessorGradientFlow(device=self.device )
__lowerCAmelCase = iterations
__lowerCAmelCase = lr
__lowerCAmelCase = log
__lowerCAmelCase = make_grid
__lowerCAmelCase = return_val
__lowerCAmelCase = quantize
__lowerCAmelCase = self.vqgan.decoder.z_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = []
if output_path is None:
__lowerCAmelCase = """./animation.gif"""
if input_path is None:
__lowerCAmelCase = self.save_path
__lowerCAmelCase = sorted(glob(input_path + """/*""" ) )
if not len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__SCREAMING_SNAKE_CASE ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__lowerCAmelCase = total_duration / len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [frame_duration] * len(__SCREAMING_SNAKE_CASE )
if extend_frames:
__lowerCAmelCase = 1.5
__lowerCAmelCase = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__SCREAMING_SNAKE_CASE ) )
imageio.mimsave(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,duration=__SCREAMING_SNAKE_CASE )
print(f'gif saved to {output_path}' )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__lowerCAmelCase = preprocess(Image.open(__SCREAMING_SNAKE_CASE ),target_image_size=2_56 ).to(self.device )
__lowerCAmelCase = preprocess_vqgan(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , *__lowerCAmelCase = self.vqgan.encode(__SCREAMING_SNAKE_CASE )
return z
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.latent.detach().requires_grad_()
__lowerCAmelCase = base_latent + transform_vector
if self.quantize:
__lowerCAmelCase , *__lowerCAmelCase = self.vqgan.quantize(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = trans_latent
return self.vqgan.decode(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = self.clip_preprocessor(text=__SCREAMING_SNAKE_CASE,images=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",padding=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.clip(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = clip_outputs.logits_per_image
if weights is not None:
__lowerCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self._get_clip_similarity(pos_prompts["""prompts"""],__SCREAMING_SNAKE_CASE,weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__lowerCAmelCase = self._get_clip_similarity(neg_prompts["""prompts"""],__SCREAMING_SNAKE_CASE,weights=neg_prompts["""weights"""] )
else:
__lowerCAmelCase = torch.tensor([1],device=self.device )
__lowerCAmelCase = -torch.log(__SCREAMING_SNAKE_CASE ) + torch.log(__SCREAMING_SNAKE_CASE )
return loss
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = torch.randn_like(self.latent,requires_grad=__SCREAMING_SNAKE_CASE,device=self.device )
__lowerCAmelCase = torch.optim.Adam([vector],lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowerCAmelCase = self._add_vector(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = loop_post_process(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self._get_CLIP_loss(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
print("""CLIP loss""",__SCREAMING_SNAKE_CASE )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
wandb.init(reinit=__SCREAMING_SNAKE_CASE,project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__lowerCAmelCase = Image.open(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.resize((2_56, 2_56) )
wandb.log("""Original Image""",wandb.Image(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not prompts:
return []
__lowerCAmelCase = []
__lowerCAmelCase = []
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__SCREAMING_SNAKE_CASE,(tuple, list) ):
__lowerCAmelCase = prompt[0]
__lowerCAmelCase = float(prompt[1] )
elif ":" in prompt:
__lowerCAmelCase , __lowerCAmelCase = prompt.split(""":""" )
__lowerCAmelCase = float(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = prompt
__lowerCAmelCase = 1.0
processed_prompts.append(__SCREAMING_SNAKE_CASE )
weights.append(__SCREAMING_SNAKE_CASE )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__SCREAMING_SNAKE_CASE,device=self.device ),
}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
if image_path:
__lowerCAmelCase = self._get_latent(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.randn(self.latent_dim,device=self.device )
if self.log:
self._init_logging(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
assert pos_prompts, "You must provide at least one positive prompt."
__lowerCAmelCase = self.process_prompts(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.process_prompts(__SCREAMING_SNAKE_CASE )
if save_final and save_path is None:
__lowerCAmelCase = os.path.join("""./outputs/""","""_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__SCREAMING_SNAKE_CASE ):
os.makedirs(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = save_path + """_""" + get_timestamp()
os.makedirs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = save_path
__lowerCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = loop_post_process(__SCREAMING_SNAKE_CASE )
for iter, transformed_img in enumerate(self._optimize_CLIP(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) ):
if show_intermediate:
show_pil(__SCREAMING_SNAKE_CASE )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path,f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__SCREAMING_SNAKE_CASE )} )
if show_final:
show_pil(__SCREAMING_SNAKE_CASE )
if save_final:
transformed_img.save(os.path.join(self.save_path,f'iter_{iter:03d}_final.png' ) )
| 368
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = TextToVideoSDPipeline
lowerCamelCase :Any = TEXT_TO_IMAGE_PARAMS
lowerCamelCase :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCamelCase :Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
_A = CLIPTextModel(lowerCAmelCase_ )
_A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> str:
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCAmelCase ( self ) -> List[Any]:
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = TextToVideoSDPipeline(**lowerCAmelCase_ )
_A = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = self.get_dummy_inputs(lowerCAmelCase_ )
_A = """np"""
_A = sd_pipe(**lowerCAmelCase_ ).frames
_A = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_A = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
_A = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_A = pipe.to("""cuda""" )
_A = """Spiderman is surfing"""
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=25 , output_type="""pt""" ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCAmelCase ( self ) -> int:
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
_A = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
_A = pipe.to("""cuda""" )
_A = """Spiderman is surfing"""
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""pt""" ).frames
_A = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 180
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''bridgetower_vision_model'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=2_88 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Any:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = num_hidden_layers
_A = num_channels
_A = patch_size
_A = image_size
_A = initializer_factor
_A = layer_norm_eps
_A = stop_gradient
_A = share_layernorm
_A = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''bridgetower_text_model'''
def __init__( self , lowerCAmelCase_=5_02_65 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_14 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = initializer_factor
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''bridgetower'''
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=7_68 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
# TODO: remove this once the Hub files are updated.
_A = kwargs.pop("""text_config_dict""" , lowerCAmelCase_ )
_A = kwargs.pop("""vision_config_dict""" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
_A = share_cross_modal_transformer_layers
_A = hidden_act
_A = hidden_size
_A = initializer_factor
_A = layer_norm_eps
_A = share_link_tower_layers
_A = link_tower_type
_A = num_attention_heads
_A = num_hidden_layers
_A = tie_word_embeddings
_A = init_layernorm_from_vision_encoder
if text_config is None:
_A = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A = BridgeTowerTextConfig(**lowerCAmelCase_ )
_A = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 180
| 1
|
from ...configuration_utils import PretrainedConfig
_snake_case : int = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "tapas"
def __init__( self : Dict , lowerCamelCase : Any=30522 , lowerCamelCase : int=768 , lowerCamelCase : List[Any]=12 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Any=3072 , lowerCamelCase : int="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[str]=1024 , lowerCamelCase : str=[3, 256, 256, 2, 256, 256, 10] , lowerCamelCase : Any=0.02 , lowerCamelCase : Optional[int]=1E-12 , lowerCamelCase : Tuple=0 , lowerCamelCase : Optional[Any]=10.0 , lowerCamelCase : Dict=0 , lowerCamelCase : Tuple=1.0 , lowerCamelCase : int=None , lowerCamelCase : List[Any]=1.0 , lowerCamelCase : Tuple=False , lowerCamelCase : Any=None , lowerCamelCase : List[str]=1.0 , lowerCamelCase : Any=1.0 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Any=False , lowerCamelCase : List[str]="ratio" , lowerCamelCase : Tuple=None , lowerCamelCase : str=None , lowerCamelCase : Union[str, Any]=64 , lowerCamelCase : Dict=32 , lowerCamelCase : int=False , lowerCamelCase : Dict=True , lowerCamelCase : str=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=True , lowerCamelCase : str=False , lowerCamelCase : Any=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Tuple , ) -> int:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__snake_case : Dict = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_sizes
__snake_case : Union[str, Any] = initializer_range
__snake_case : Union[str, Any] = layer_norm_eps
# Fine-tuning task hyperparameters
__snake_case : Optional[Any] = positive_label_weight
__snake_case : Any = num_aggregation_labels
__snake_case : Dict = aggregation_loss_weight
__snake_case : Any = use_answer_as_supervision
__snake_case : Any = answer_loss_importance
__snake_case : Optional[int] = use_normalized_answer_loss
__snake_case : int = huber_loss_delta
__snake_case : str = temperature
__snake_case : str = aggregation_temperature
__snake_case : Optional[Any] = use_gumbel_for_cells
__snake_case : Optional[Any] = use_gumbel_for_aggregation
__snake_case : int = average_approximation_function
__snake_case : Dict = cell_selection_preference
__snake_case : Any = answer_loss_cutoff
__snake_case : Optional[int] = max_num_rows
__snake_case : int = max_num_columns
__snake_case : Tuple = average_logits_per_cell
__snake_case : int = select_one_column
__snake_case : Union[str, Any] = allow_empty_column_selection
__snake_case : int = init_cell_selection_weights_to_zero
__snake_case : Optional[Any] = reset_position_index_per_cell
__snake_case : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
__snake_case : str = aggregation_labels
__snake_case : List[str] = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase ):
__snake_case : List[str] = {int(lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 134
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__snake_case : List[Any] = MaskFormerConfig(backbone_config=__lowerCamelCase )
__snake_case : List[Any] = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
__snake_case : Any = 8_4_7
__snake_case : List[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
__snake_case : Optional[int] = 1_5_0
__snake_case : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
__snake_case : Optional[Any] = 1_7_1
__snake_case : List[str] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
__snake_case : Optional[int] = 1_3_3
__snake_case : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
__snake_case : Union[str, Any] = 1_9
__snake_case : Dict = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
__snake_case : Any = 6_5
__snake_case : Any = "mapillary-vistas-id2label.json"
__snake_case : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Dict = dct.pop(__lowerCamelCase )
__snake_case : Any = val
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Tuple = in_proj_weight[:dim, :]
__snake_case : Tuple = in_proj_bias[: dim]
__snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
__snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
__snake_case : str = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# fmt: off
__snake_case : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[: hidden_size, :]
__snake_case : Optional[int] = in_proj_bias[:config.hidden_size]
__snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Tuple = in_proj_weight[-hidden_size :, :]
__snake_case : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case : Optional[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : int = in_proj_weight[: hidden_size, :]
__snake_case : Tuple = in_proj_bias[:config.hidden_size]
__snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Optional[Any] = in_proj_weight[-hidden_size :, :]
__snake_case : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Optional[int] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase , "rb" ) as f:
__snake_case : int = pickle.load(__lowerCamelCase )
__snake_case : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__snake_case : Tuple = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__snake_case : int = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
__snake_case : List[str] = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase , param.shape )
__snake_case , __snake_case : List[str] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__snake_case : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__snake_case : Optional[int] = 6_5
elif "cityscapes" in model_name:
__snake_case : Optional[int] = 6_5_5_3_5
else:
__snake_case : Union[str, Any] = 2_5_5
__snake_case : Union[str, Any] = True if "ade" in model_name else False
__snake_case : str = MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase )
__snake_case : List[str] = image_processor(__lowerCamelCase , return_tensors="pt" )
__snake_case : Tuple = model(**__lowerCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__snake_case : Optional[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 134
| 1
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCamelCase : list[float] ,__lowerCamelCase : Optional[int] ):
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(__lowerCamelCase ):
print(F'{i}\t\t{d}' )
def UpperCAmelCase_ ( __lowerCamelCase : list[dict[str, int]] ,__lowerCamelCase : list[float] ,__lowerCamelCase : int ):
for j in range(__lowerCamelCase ):
lowercase_ , lowercase_ , lowercase_ :Optional[Any] = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase_ ( __lowerCamelCase : list[dict[str, int]] ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int ):
lowercase_ :Optional[Any] = [float("inf" )] * vertex_count
lowercase_ :List[Any] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowerCamelCase ):
lowercase_ , lowercase_ , lowercase_ :List[Any] = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
lowercase_ :Any = distance[u] + w
lowercase_ :Any = check_negative_cycle(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict =int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase : str =int(input('''Enter number of edges: ''').strip())
lowerCAmelCase : list[dict[str, int]] =[{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] =(
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase : Dict ={'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase : Optional[int] =int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase : int =bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 223
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase : Optional[int] ={
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : List[Any] ={
'''RUCAIBox/mvp''': 1_024,
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
__A = MvpTokenizer
def __init__( self : Optional[Any] , lowercase : Any=None , lowercase : List[Any]=None , lowercase : Dict=None , lowercase : int="replace" , lowercase : int="<s>" , lowercase : List[str]="</s>" , lowercase : Optional[Any]="</s>" , lowercase : List[str]="<s>" , lowercase : List[str]="<unk>" , lowercase : List[str]="<pad>" , lowercase : Tuple="<mask>" , lowercase : Tuple=False , lowercase : Dict=True , **lowercase : List[str] , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowercase_ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :List[str] = getattr(lowercase , pre_tok_state.pop("type" ) )
lowercase_ :int = add_prefix_space
lowercase_ :Optional[int] = pre_tok_class(**lowercase )
lowercase_ :Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ :List[Any] = "post_processor"
lowercase_ :str = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowercase_ :Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ :int = tuple(state["sep"] )
if "cls" in state:
lowercase_ :Any = tuple(state["cls"] )
lowercase_ :int = False
if state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :Union[str, Any] = add_prefix_space
lowercase_ :int = True
if state.get("trim_offsets" , lowercase ) != trim_offsets:
lowercase_ :Any = trim_offsets
lowercase_ :int = True
if changes_to_apply:
lowercase_ :Tuple = getattr(lowercase , state.pop("type" ) )
lowercase_ :Any = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowercase_ :Union[str, Any] = value
def lowercase__ ( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Any ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Optional[Any] , *lowercase : Optional[int] , **lowercase : int ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :str = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict , lowercase : int=None ):
"""simple docstring"""
lowercase_ :List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
lowercase_ :Union[str, Any] = [self.sep_token_id]
lowercase_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = XLMRobertaTokenizer
__UpperCamelCase : Optional[int] = XLMRobertaTokenizerFast
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : int = True
def lowerCAmelCase__ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_: Any = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = '<pad>'
UpperCamelCase_: Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_UpperCAmelCase ) , 1002 )
def lowerCAmelCase__ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: int = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCamelCase_: Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_: Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_: Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase_: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase__ ( self : Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase_: List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: int = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_: Union[str, Any] = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_: Tuple = tempfile.mkdtemp()
UpperCamelCase_: Optional[int] = tokenizer_r.save_pretrained(_UpperCAmelCase )
UpperCamelCase_: Dict = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCamelCase_: Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCamelCase_: Any = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCamelCase_: Optional[Any] = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCamelCase_: Any = tempfile.mkdtemp()
UpperCamelCase_: Dict = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCamelCase_: Optional[Any] = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCamelCase_: str = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCamelCase_: str = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCamelCase_: Tuple = tempfile.mkdtemp()
UpperCamelCase_: str = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCamelCase_: Tuple = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase_: List[str] = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCamelCase_: Optional[int] = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@cached_property
def lowerCAmelCase__ ( self : int ):
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCAmelCase__ ( self : Dict ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase , f.name )
UpperCamelCase_: Any = XLMRobertaTokenizer(f.name , keep_accents=_UpperCAmelCase )
UpperCamelCase_: Tuple = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Tuple = self.get_tokenizer()
UpperCamelCase_: Any = self.get_rust_tokenizer()
UpperCamelCase_: List[Any] = 'I was born in 92000, and this is falsé.'
UpperCamelCase_: Dict = tokenizer.tokenize(_UpperCAmelCase )
UpperCamelCase_: Union[str, Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCamelCase_: Dict = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_: Optional[int] = self.get_rust_tokenizer()
UpperCamelCase_: str = tokenizer.encode(_UpperCAmelCase )
UpperCamelCase_: Dict = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: str = 'Hello World!'
UpperCamelCase_: Optional[Any] = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase_: str = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
# fmt: off
UpperCamelCase_: Tuple = {'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 369
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """ctrl"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , snake_case_ : Any=24_6534 , snake_case_ : Dict=256 , snake_case_ : str=1280 , snake_case_ : Optional[int]=8192 , snake_case_ : Union[str, Any]=48 , snake_case_ : Any=16 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Any=1e-6 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Union[str, Any] = n_positions
UpperCamelCase_: Optional[int] = n_embd
UpperCamelCase_: int = n_layer
UpperCamelCase_: str = n_head
UpperCamelCase_: Optional[int] = dff
UpperCamelCase_: Optional[Any] = resid_pdrop
UpperCamelCase_: Union[str, Any] = embd_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = use_cache
super().__init__(**snake_case_ )
| 223
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
lowerCamelCase__ = [8, 5, 9, 7]
lowerCamelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[str] = claim_vector
__lowerCAmelCase : str = allocated_resources_table
__lowerCAmelCase : Optional[int] = maximum_claim_table
def __lowerCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_SCREAMING_SNAKE_CASE ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCamelCase ( self ):
return {self.__need().index(_SCREAMING_SNAKE_CASE ): i for i in self.__need()}
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : List[str] = self.__available_resources()
__lowerCAmelCase : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__lowerCAmelCase : Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase : Optional[int] = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE ):
if need > available_resources[index]:
__lowerCAmelCase : List[Any] = False
break
if execution:
__lowerCAmelCase : Optional[int] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Optional[int] = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE )
# update available/freed resources stack
__lowerCAmelCase : Optional[int] = np.array(_SCREAMING_SNAKE_CASE ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowerCamelCase ( self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE ) + 1}"
+ ' '.join(f"{it:>8}" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE ) + 1}"
+ ' '.join(f"{it:>8}" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 10_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = 2**power
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = list(__snake_case )
_UpperCamelCase = 0
for i in list_num:
sum_of_num += int(__snake_case )
return sum_of_num
if __name__ == "__main__":
_a = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_a = solution(power)
print("""Sum of the digits is: """, result)
| 194
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : int = 4_2
_lowerCAmelCase : Dict = None
# Automatically constructed
_lowerCAmelCase : Optional[Any] = """dict"""
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Tuple = field(default="""Translation""" , init=__UpperCamelCase , repr=__UpperCamelCase)
def __call__( self : str ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _snake_case ( self : str ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[Any] = None
# Automatically constructed
_lowerCAmelCase : Optional[Any] = """dict"""
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[Any] = field(default="""TranslationVariableLanguages""" , init=__UpperCamelCase , repr=__UpperCamelCase)
def _snake_case ( self : Optional[int] ):
snake_case_ : Tuple = sorted(set(self.languages ) ) if self.languages else None
snake_case_ : List[str] = len(self.languages ) if self.languages else None
def __call__( self : Union[str, Any] ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def _snake_case ( self : Dict , lowercase_ : Dict ):
snake_case_ : Optional[int] = set(self.languages )
if self.languages and set(lowercase_ ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(lowercase_ ) - lang_set ) )}) are not in valid set ({', '.join(lowercase_ )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case_ : Dict = []
for lang, text in translation_dict.items():
if isinstance(lowercase_ , lowercase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case_ : List[Any] = zip(*sorted(lowercase_ ) )
return {"language": languages, "translation": translations}
def _snake_case ( self : List[str] ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 351
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 155
| 0
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def __a():
'''simple docstring'''
_lowerCAmelCase = "Morse code here!"
print(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 158
|
'''simple docstring'''
from collections.abc import Sequence
def __a(SCREAMING_SNAKE_CASE_ : Sequence[float] , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if not arr:
return 0
_lowerCAmelCase = 0 if allow_empty_subarrays else float("-inf" )
_lowerCAmelCase = 0.0
for num in arr:
_lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowerCAmelCase = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 158
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case_ : List[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
_UpperCamelCase : Union[str, Any] = pd.read_csv(UpperCAmelCase_ , sep='\t' , header=UpperCAmelCase_ )
for answer_list in data[1]:
_UpperCamelCase : List[Any] = ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
_UpperCamelCase : Tuple = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : int = [[reference] for reference in references]
_UpperCamelCase : Tuple = 0
for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = 100.0 * em / total
_UpperCamelCase : List[str] = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = args.k
_UpperCamelCase : Tuple = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : int = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : Tuple = 0
for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = set(hypo.split('\t' )[:k] )
_UpperCamelCase : Dict = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_UpperCamelCase : Optional[int] = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def strip_title(UpperCAmelCase_ ):
if title.startswith('"' ):
_UpperCamelCase : Optional[Any] = title[1:]
if title.endswith('"' ):
_UpperCamelCase : List[Any] = title[:-1]
return title
_UpperCamelCase : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors='pt' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )['input_ids'].to(args.device )
_UpperCamelCase : List[Any] = rag_model.rag.question_encoder(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = question_enc_outputs[0]
_UpperCamelCase : str = rag_model.retriever(
UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
_UpperCamelCase : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_UpperCamelCase : Optional[int] = []
for docs in all_docs:
_UpperCamelCase : Any = [strip_title(UpperCAmelCase_ ) for title in docs['title']]
provenance_strings.append('\t'.join(UpperCAmelCase_ ) )
return provenance_strings
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
with torch.no_grad():
_UpperCamelCase : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors='pt' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = inputs_dict.input_ids.to(args.device )
_UpperCamelCase : str = inputs_dict.attention_mask.to(args.device )
_UpperCamelCase : Tuple = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_UpperCamelCase : Dict = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info('Q: {} - A: {}'.format(UpperCAmelCase_ , UpperCAmelCase_ ) )
return answers
def A__ ( ):
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=UpperCAmelCase_ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=UpperCAmelCase_ , choices=['exact', 'compressed', 'legacy'] , type=UpperCAmelCase_ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=UpperCAmelCase_ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=UpperCAmelCase_ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=UpperCAmelCase_ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=UpperCAmelCase_ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=UpperCAmelCase_ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=UpperCAmelCase_ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=UpperCAmelCase_ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=UpperCAmelCase_ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=UpperCAmelCase_ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
_UpperCamelCase : str = parser.parse_args()
_UpperCamelCase : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = {}
if args.model_type is None:
_UpperCamelCase : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
_UpperCamelCase : Dict = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
_UpperCamelCase : Union[str, Any] = args.n_docs
if args.index_name is not None:
_UpperCamelCase : Optional[Any] = args.index_name
if args.index_path is not None:
_UpperCamelCase : List[Any] = args.index_path
else:
_UpperCamelCase : List[str] = BartForConditionalGeneration
_UpperCamelCase : Optional[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
_UpperCamelCase : str = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(UpperCAmelCase_ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
_UpperCamelCase : Optional[int] = RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
_UpperCamelCase : Tuple = model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
_UpperCamelCase : List[Any] = model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
_UpperCamelCase : Any = []
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
_UpperCamelCase : List[str] = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write('\n'.join(UpperCAmelCase_ ) + '\n' )
preds_file.flush()
_UpperCamelCase : Optional[Any] = []
if len(UpperCAmelCase_ ) > 0:
_UpperCamelCase : Dict = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write('\n'.join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case_ : Any = get_args()
main(args)
| 369
|
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ ):
if not nums:
return 0
_UpperCamelCase : Any = nums[0]
_UpperCamelCase : Optional[int] = 0
for num in nums[1:]:
_UpperCamelCase , _UpperCamelCase : Optional[Any] = (
max_excluding + num,
max(UpperCAmelCase_ , UpperCAmelCase_ ),
)
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236
| 0
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any]=sys.maxsize):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''bilinear'''
SCREAMING_SNAKE_CASE_ : Optional[int] = max_size
SCREAMING_SNAKE_CASE_ : Any = short_edge_length
def __call__( self : str , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for img in imgs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE_ : Any = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
SCREAMING_SNAKE_CASE_ : Dict = size * 1.0 / min(lowercase_ , lowercase_)
if h < w:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = size, scale * w
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = scale * h, size
if max(lowercase_ , lowercase_) > self.max_size:
SCREAMING_SNAKE_CASE_ : List[str] = self.max_size * 1.0 / max(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = newh * scale
SCREAMING_SNAKE_CASE_ : str = neww * scale
SCREAMING_SNAKE_CASE_ : int = int(neww + 0.5)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(newh + 0.5)
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE_ : Tuple = Image.fromarray(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
SCREAMING_SNAKE_CASE_ : int = np.asarray(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE_ : int = nn.functional.interpolate(
lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_).squeeze(0)
img_augs.append(lowercase_)
return img_augs
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
SCREAMING_SNAKE_CASE_ : Any = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE_ : List[str] = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE_ : List[str] = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE_ : List[Any] = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
SCREAMING_SNAKE_CASE_ : Dict = lambda lowercase_: (x - self.pixel_mean) / self.pixel_std
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = tuple(max(lowercase_) for s in zip(*[img.shape for img in images]))
SCREAMING_SNAKE_CASE_ : List[str] = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE_ : int = [
nn.functional.pad(
lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase_ , lowercase_)
]
return torch.stack(lowercase_), torch.tensor(lowercase_)
def __call__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any]=False):
'''simple docstring'''
with torch.no_grad():
if not isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [images]
if single_image:
assert len(lowercase_) == 1
for i in range(len(lowercase_)):
if isinstance(images[i] , torch.Tensor):
images.insert(lowercase_ , images.pop(lowercase_).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([im.shape[:2] for im in images])
SCREAMING_SNAKE_CASE_ : Optional[int] = self.aug(lowercase_)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.normalizer(lowercase_) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.pad(lowercase_)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.true_divide(lowercase_ , lowercase_)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _A (__a , __a ) -> Dict:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _A (__a , __a ) -> Any:
"""simple docstring"""
assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=__a )
tensor[:, 1].clamp_(min=0 , max=__a )
tensor[:, 2].clamp_(min=0 , max=__a )
tensor[:, 3].clamp_(min=0 , max=__a )
| 91
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
a__ : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig):
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = "utf-8"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True # deprecated
__SCREAMING_SNAKE_CASE = None # deprecated
__SCREAMING_SNAKE_CASE = 1_0 << 2_0 # 10MB
__SCREAMING_SNAKE_CASE = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder):
__SCREAMING_SNAKE_CASE = JsonConfig
def __lowerCamelCase ( self ) -> Any:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
__UpperCamelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
__UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
__UpperCamelCase = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def __lowerCamelCase ( self , lowercase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__UpperCamelCase = self.config.features.arrow_schema.field(lowerCAmelCase__ ).type
__UpperCamelCase = pa_table.append_column(lowerCAmelCase__ , pa.array([None] * len(lowerCAmelCase__ ) , type=lowerCAmelCase__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCamelCase = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def __lowerCamelCase ( self , lowercase ) -> Any:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCamelCase = json.load(lowerCAmelCase__ )
# We keep only the field we are interested in
__UpperCamelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase__ , (list, tuple) ):
__UpperCamelCase = set().union(*[row.keys() for row in dataset] )
__UpperCamelCase = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys}
else:
__UpperCamelCase = dataset
__UpperCamelCase = pa.Table.from_pydict(lowerCAmelCase__ )
yield file_idx, self._cast_table(lowerCAmelCase__ )
# If the file has one json object per line
else:
with open(lowerCAmelCase__ , """rb""" ) as f:
__UpperCamelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCamelCase = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
__UpperCamelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
__UpperCamelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCamelCase = batch.decode(self.config.encoding , errors=lowerCAmelCase__ ).encode("""utf-8""" )
try:
while True:
try:
__UpperCamelCase = paj.read_json(
io.BytesIO(lowerCAmelCase__ ) , read_options=paj.ReadOptions(block_size=lowerCAmelCase__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase__ , pa.ArrowInvalid )
and "straddling" not in str(lowerCAmelCase__ )
or block_size > len(lowerCAmelCase__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(lowerCAmelCase__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCamelCase = json.load(lowerCAmelCase__ )
except json.JSONDecodeError:
logger.error(f"Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # list is the only sequence type supported in JSON
try:
__UpperCamelCase = set().union(*[row.keys() for row in dataset] )
__UpperCamelCase = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys}
__UpperCamelCase = pa.Table.from_pydict(lowerCAmelCase__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}" )
raise ValueError(f"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(lowerCAmelCase__ )
break
else:
logger.error(f"Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}" )
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
f"Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
batch_idx += 1
| 366
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
a__ : Dict = ''
a__ : List[str] = ''
a__ : Optional[Any] = ''
a__ : Any = ''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = tweepy.OAuthHandler(__A ,__A )
auth.set_access_token(__A ,__A )
__UpperCamelCase = tweepy.API(__A )
# initialize a list to hold all the tweepy Tweets
__UpperCamelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__UpperCamelCase = api.user_timeline(screen_name=__A ,count=200 )
# save most recent tweets
alltweets.extend(__A )
# save the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__A ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__UpperCamelCase = api.user_timeline(
screen_name=__A ,count=200 ,max_id=__A )
# save most recent tweets
alltweets.extend(__A )
# update the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
print(f"...{len(__A )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__UpperCamelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" ,"""w""" ) as f:
__UpperCamelCase = csv.writer(__A )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 243
| 0
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Dict , *a :Optional[int] , **a :List[str] ) -> None:
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , a , )
super().__init__(*a , **a )
| 232
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase : Optional[Any] = TypeVar('T')
class lowerCamelCase__ ( Generic[T]):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 1_0 # Maximum capacity of cache
def __init__( self :Optional[Any] , a :int ) -> None:
__UpperCamelCase : Union[str, Any] = deque()
__UpperCamelCase : str = set()
if not n:
__UpperCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__UpperCamelCase : Any = n
def _lowerCamelCase ( self :Tuple , a :T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCamelCase : int = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def _lowerCamelCase ( self :Any ) -> None:
for k in self.dq_store:
print(a )
def __repr__( self :Tuple ) -> str:
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 232
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE = """src/diffusers"""
__SCREAMING_SNAKE_CASE = """."""
# This is to make sure the diffusers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
__SCREAMING_SNAKE_CASE = spec.loader.load_module()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return line.startswith(_lowerCamelCase ) or len(_lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , _lowerCamelCase ) is not None
def UpperCAmelCase ( _lowerCamelCase ):
A : int = object_name.split("." )
A : List[str] = 0
# First let's find the module where our object lives.
A : Optional[Any] = parts[i]
while i < len(_lowerCamelCase ) and not os.path.isfile(os.path.join(_lowerCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(_lowerCamelCase ):
A : int = os.path.join(_lowerCamelCase , parts[i] )
if i >= len(_lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_lowerCamelCase , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
A : List[str] = f.readlines()
# Now let's find the class / func in the code!
A : Optional[int] = ""
A : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A : Dict = line_index
while line_index < len(_lowerCamelCase ) and _should_continue(lines[line_index] , _lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A : Optional[Any] = lines[start_index:line_index]
return "".join(_lowerCamelCase )
__SCREAMING_SNAKE_CASE = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
__SCREAMING_SNAKE_CASE = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
__SCREAMING_SNAKE_CASE = re.compile(r"""<FILL\s+[^>]*>""")
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[int] = code.split("\n" )
A : Dict = 0
while idx < len(_lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_lowerCamelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = len(get_indent(_lowerCamelCase ) ) > 0
if has_indent:
A : Dict = f"""class Bla:\n{code}"""
A : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_lowerCamelCase )
A : Union[str, Any] = black.format_str(_lowerCamelCase , mode=_lowerCamelCase )
A , A : List[str] = style_docstrings_in_code(_lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ):
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A : Dict = f.readlines()
A : List[str] = []
A : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCamelCase ):
A : List[str] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A : Tuple = search.groups()
A : Dict = find_code_in_diffusers(_lowerCamelCase )
A : Optional[int] = get_indent(_lowerCamelCase )
A : str = line_index + 1 if indent == theoretical_indent else line_index + 2
A : str = theoretical_indent
A : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A : Tuple = True
while line_index < len(_lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_lowerCamelCase ):
break
A : str = lines[line_index]
A : int = _should_continue(_lowerCamelCase , _lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , _lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A : int = lines[start_index:line_index]
A : int = "".join(_lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A : List[str] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_lowerCamelCase ) is None]
A : Tuple = "\n".join(_lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCamelCase ) > 0:
A : Optional[Any] = replace_pattern.replace("with" , "" ).split("," )
A : Tuple = [_re_replace_pattern.search(_lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A : str = pattern.groups()
A : Tuple = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if option.strip() == "all-casing":
A : Dict = re.sub(obja.lower() , obja.lower() , _lowerCamelCase )
A : Dict = re.sub(obja.upper() , obja.upper() , _lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A : List[Any] = blackify(lines[start_index - 1] + theoretical_code )
A : str = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
A : List[Any] = start_index + 1
if overwrite and len(_lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_lowerCamelCase )
return diffs
def UpperCAmelCase ( _lowerCamelCase = False ):
A : List[str] = glob.glob(os.path.join(_lowerCamelCase , "**/*.py" ) , recursive=_lowerCamelCase )
A : Optional[int] = []
for filename in all_files:
A : List[str] = is_copy_consistent(_lowerCamelCase , _lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_lowerCamelCase ) > 0:
A : int = "\n".join(_lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 256
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Any ) -> Optional[Any]:
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
raise NotImplementedError()
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : str = tokenizer
A : Tuple = skip_prompt
A : Optional[Any] = decode_kwargs
# variables used in the streaming process
A : Any = []
A : Tuple = 0
A : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> int:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
A : List[str] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
A : Tuple = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
A : str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
A : int = text[self.print_len :]
A : Union[str, Any] = []
A : Any = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
A : Optional[int] = text[self.print_len :]
self.print_len += len(__lowerCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
A : Union[str, Any] = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(__lowerCamelCase )
self.on_finalized_text(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
A : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
A : Optional[Any] = text[self.print_len :]
A : Optional[int] = []
A : List[str] = 0
else:
A : List[Any] = ""
A : Union[str, Any] = True
self.on_finalized_text(__lowerCamelCase , stream_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> List[str]:
print(__lowerCamelCase , flush=__lowerCamelCase , end="" if not stream_end else None )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int ) -> Dict:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[float] = None , **__lowerCamelCase : Dict ) -> Any:
super().__init__(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
A : Tuple = Queue()
A : Dict = None
A : List[str] = timeout
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Any:
self.text_queue.put(__lowerCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : int ) -> Tuple:
return self
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : int = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 256
| 1
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
a__ = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class UpperCAmelCase_ ( __a ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : List[int] = []
def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ) -> List[str]:
_a : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_a : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_a : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
_a : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_a : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
_a : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_a : Tuple = vocab_file
_a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def __lowercase ( self ) -> Union[str, Any]:
return self.sp_model.get_piece_size()
def __lowercase ( self ) -> int:
_a : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
_a : Union[str, Any] = self.__dict__.copy()
_a : Dict = None
return state
def __setstate__( self , _a ) -> Any:
_a : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : str = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a ) -> Union[str, Any]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __lowercase ( self , _a ) -> int:
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def __lowercase ( self , _a ) -> Tuple:
_a : Union[str, Any] = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def __lowercase ( self , _a ) -> str:
_a : Optional[Any] = []
_a : str = ''''''
_a : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_a : Union[str, Any] = True
_a : Optional[Any] = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
_a : List[str] = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __lowercase ( self , _a , _a = False , _a = None , _a = True , **_a , ) -> str:
_a : Tuple = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ )
_a : int = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a : List[Any] = []
_a : Optional[int] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
_a : Dict = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a : int = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(lowerCAmelCase__ ) )
else:
_a : Dict = ''''''.join(lowerCAmelCase__ )
_a : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a : Union[str, Any] = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def __lowercase ( self , _a , _a = None ) -> Union[str, Any]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __lowercase ( self , _a , _a = None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[Any] = [self.cls_token_id]
_a : int = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self , _a , _a = None , _a = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __lowercase ( self , _a , _a = None ) -> Optional[Any]:
_a : str = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 235
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Dict = '''informer'''
__lowercase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , lowerCAmelCase__ = "prob" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
# Informer
__SCREAMING_SNAKE_CASE = attention_type
__SCREAMING_SNAKE_CASE = sampling_factor
__SCREAMING_SNAKE_CASE = distil
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 100
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
__lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
__lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase , hypotheses=UpperCAmelCase , min_len=UpperCAmelCase , max_len=UpperCAmelCase )
}
| 270
|
'''simple docstring'''
from math import sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 0
for i in range(1 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(_SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(_SCREAMING_SNAKE_CASE ):
total += i
return total - n
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1_0000 ):
_snake_case = sum(
i
for i in range(1 , _SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(_SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(_SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 270
| 1
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = [False] * len(lowerCAmelCase__ )
lowerCAmelCase_ : str = [-1] * len(lowerCAmelCase__ )
def dfs(lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ):
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase__ , 1 - c )
for i in range(len(lowerCAmelCase__ ) ):
if not visited[i]:
dfs(lowerCAmelCase__ , 0 )
for i in range(len(lowerCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase__ : Optional[int] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 224
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
lowerCAmelCase , lowerCAmelCase = b, a % b
return a
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def UpperCAmelCase__ ( ):
'''simple docstring'''
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 46
| 0
|
"""simple docstring"""
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : int = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
lowerCAmelCase__ : str = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : List[str] = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase__ : List[Any] = corpus_without_punctuation.split('''\n''' )
lowerCAmelCase__ : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(A_ ))
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return round(tf * idf , 3 )
| 361
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : int=1_3 ,lowercase_ : Optional[int]=3_0 ,lowercase_ : int=2 ,lowercase_ : List[Any]=3 ,lowercase_ : str=True ,lowercase_ : int=True ,lowercase_ : str=3_2 ,lowercase_ : Optional[int]=5 ,lowercase_ : Optional[Any]=4 ,lowercase_ : Any=3_7 ,lowercase_ : str="gelu" ,lowercase_ : Any=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : str=0.02 ,):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : int = (image_size // patch_size) ** 2
lowerCAmelCase__ : Dict = num_patches + 1
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values
def __lowerCAmelCase ( self : Tuple ,lowercase_ : List[Any] ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Optional[Any] = FlaxViTModel(config=lowercase_ )
lowerCAmelCase__ : Dict = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : int = (self.image_size, self.image_size)
lowerCAmelCase__ : int = (self.patch_size, self.patch_size)
lowerCAmelCase__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : List[str] ):
lowerCAmelCase__ : Optional[int] = self.type_sequence_label_size
lowerCAmelCase__ : Any = FlaxViTForImageClassification(config=lowercase_ )
lowerCAmelCase__ : Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Tuple = FlaxViTForImageClassification(lowercase_ )
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : str = model(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Tuple = FlaxViTModelTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(lowercase_ )
lowerCAmelCase__ : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Dict = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : List[Any] ,**lowercase_ : Optional[int] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Optional[Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : Optional[int] = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowerCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
lowerCAmelCase__ : Optional[int] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
| 74
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "codegen"
lowerCAmelCase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __lowercase : str=50400 , __lowercase : List[str]=2048 , __lowercase : Any=2048 , __lowercase : Optional[Any]=4096 , __lowercase : Optional[Any]=28 , __lowercase : Tuple=16 , __lowercase : str=64 , __lowercase : str=None , __lowercase : Any="gelu_new" , __lowercase : List[Any]=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : Any=0.0 , __lowercase : List[Any]=1E-5 , __lowercase : Optional[Any]=0.0_2 , __lowercase : Any=True , __lowercase : Optional[int]=50256 , __lowercase : List[str]=50256 , __lowercase : Optional[int]=False , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase )
class lowerCAmelCase ( A ):
def __init__( self : Any , __lowercase : PretrainedConfig , __lowercase : str = "default" , __lowercase : List[PatchingSpec] = None , __lowercase : bool = False , ):
"""simple docstring"""
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase )
if not getattr(self._config , 'pad_token_id' , __lowercase ):
# TODO: how to do that better?
__lowercase =0
@property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='inputs' )
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case ( self : Optional[int] ):
"""simple docstring"""
return self._config.n_layer
@property
def snake_case ( self : int ):
"""simple docstring"""
return self._config.n_head
def snake_case ( self : Union[str, Any] , __lowercase : PreTrainedTokenizer , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional[TensorType] = None , ):
"""simple docstring"""
__lowercase =super(__lowercase , self ).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers )
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
return ordered_inputs
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return 13
| 141
|
'''simple docstring'''
import datasets
UpperCAmelCase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
UpperCAmelCase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
UpperCAmelCase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : List[str] ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def snake_case ( self : List[str] , __lowercase : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
| 141
| 1
|
'''simple docstring'''
import random
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Any = a[left_index]
__UpperCAmelCase : Optional[Any] = left_index + 1
for j in range(left_index + 1, _UpperCAmelCase ):
if a[j] < pivot:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = a[i], a[j]
i += 1
__UpperCAmelCase , __UpperCAmelCase : Dict = a[i - 1], a[left_index]
return i - 1
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if left < right:
__UpperCAmelCase : Any = random.randint(_UpperCAmelCase, right - 1 )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCAmelCase : str = partition(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
quick_sort_random(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCAmelCase, pivot_index + 1, _UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def __UpperCamelCase ( ):
__UpperCAmelCase : Dict = input("Enter numbers separated by a comma:\n" ).strip()
__UpperCAmelCase : Optional[Any] = [int(_UpperCAmelCase ) for item in user_input.split("," )]
quick_sort_random(_UpperCAmelCase, 0, len(_UpperCAmelCase ) )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 37
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_103 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__UpperCAmelCase : Tuple = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["This is going to be way too long." * 150, "short example"]
__UpperCAmelCase : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : str = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = ["This is going to be way too long." * 1_000, "short example"]
__UpperCAmelCase : List[Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 37
| 1
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85
|
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85
| 1
|
import os
from pathlib import Path
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ : List[str] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowerCamelCase__ : Union[str, Any] = f"{src_lang}-{tgt_lang}"
lowerCamelCase__ : Any = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCAmelCase , '''README.md''' )
print(f"Generating {path}" )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
_A : Any = Path(__file__).resolve().parent.parent.parent
_A : Any = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_A : str = model_name.split('-')
_A : List[str] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 359
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 265
| 0
|
import functools
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
# Validation
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(SCREAMING_SNAKE_CASE ) >= 3_6_6:
raise ValueError('''All days elements should be less than 366''' )
__UpperCAmelCase = set(SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ , A_ ):
UpperCamelCase_ : List[str] ='''maskformer-swin'''
UpperCamelCase_ : List[str] ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , lowerCAmelCase=2_2_4 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=9_6 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 1_2, 2_4] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= image_size
__lowercase= patch_size
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= depths
__lowercase= len(lowerCAmelCase )
__lowercase= num_heads
__lowercase= window_size
__lowercase= mlp_ratio
__lowercase= qkv_bias
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= drop_path_rate
__lowercase= hidden_act
__lowercase= use_absolute_embeddings
__lowercase= layer_norm_eps
__lowercase= initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase= int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
__lowercase= ['stem'] + [f'stage{idx}' for idx in range(1 , len(lowerCAmelCase ) + 1 )]
__lowercase, __lowercase= get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 304
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 178
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Union[List[PIL.Image.Image], np.ndarray]
A_ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 357
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Any=False ):
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : str = len(set_a.intersection(lowerCAmelCase ) )
if alternative_union:
__magic_name__ : List[str] = len(lowerCAmelCase ) + len(lowerCAmelCase )
else:
__magic_name__ : Any = len(set_a.union(lowerCAmelCase ) )
return intersection / union
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(lowerCAmelCase , (list, tuple) ):
__magic_name__ : str = [element for element in set_a if element in set_b]
if alternative_union:
__magic_name__ : Dict = len(lowerCAmelCase ) + len(lowerCAmelCase )
return len(lowerCAmelCase ) / union
else:
__magic_name__ : Any = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return len(lowerCAmelCase ) / len(lowerCAmelCase )
return None
if __name__ == "__main__":
lowerCAmelCase :Dict = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowerCAmelCase :Tuple = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 275
| 0
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase = logging.getLogger(__name__)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =np.argmax(_lowerCAmelCase , axis=1 )
return np.sum(outputs == labels )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding='utf_8' ) as f:
__lowercase =csv.reader(_lowerCAmelCase )
__lowercase =[]
next(_lowerCAmelCase ) # skip the first line
for line in tqdm(_lowerCAmelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for dataset in encoded_datasets:
__lowercase =len(_lowerCAmelCase )
__lowercase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__lowercase =np.zeros((n_batch, 2) , dtype=np.intaa )
__lowercase =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__lowercase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowerCAmelCase ):
__lowercase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowercase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowercase =with_conta
__lowercase =with_conta
__lowercase =len(_lowerCAmelCase ) - 1
__lowercase =len(_lowerCAmelCase ) - 1
__lowercase =with_conta
__lowercase =with_conta
__lowercase =mc_label
__lowercase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowerCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_lowerCAmelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_lowerCAmelCase , default='' )
parser.add_argument('--eval_dataset' , type=_lowerCAmelCase , default='' )
parser.add_argument('--seed' , type=_lowerCAmelCase , default=42 )
parser.add_argument('--num_train_epochs' , type=_lowerCAmelCase , default=3 )
parser.add_argument('--train_batch_size' , type=_lowerCAmelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=_lowerCAmelCase , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_lowerCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_lowerCAmelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_lowerCAmelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_lowerCAmelCase , default=6.2_5e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_lowerCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_lowerCAmelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_lowerCAmelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=_lowerCAmelCase , default=0.9 )
parser.add_argument('--n_valid' , type=_lowerCAmelCase , default=374 )
parser.add_argument('--server_ip' , type=_lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
__lowercase =parser.parse_args()
print(_lowerCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowercase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__lowercase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_lowerCAmelCase , _lowerCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowercase =['_start_', '_delimiter_', '_classify_']
__lowercase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowerCAmelCase )
__lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
__lowercase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
model.to(_lowerCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(_lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowerCAmelCase ) )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return obj
return [tokenize_and_encode(_lowerCAmelCase ) for o in obj]
logger.info('Encoding dataset...' )
__lowercase =load_rocstories_dataset(args.train_dataset )
__lowercase =load_rocstories_dataset(args.eval_dataset )
__lowercase =(train_dataset, eval_dataset)
__lowercase =tokenize_and_encode(_lowerCAmelCase )
# Compute the max input length for the Transformer
__lowercase =model.config.n_positions // 2 - 2
__lowercase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowercase =min(_lowerCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowercase =pre_process_datasets(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
__lowercase , __lowercase =tensor_datasets[0], tensor_datasets[1]
__lowercase =TensorDataset(*_lowerCAmelCase )
__lowercase =RandomSampler(_lowerCAmelCase )
__lowercase =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.train_batch_size )
__lowercase =TensorDataset(*_lowerCAmelCase )
__lowercase =SequentialSampler(_lowerCAmelCase )
__lowercase =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowercase =args.max_steps
__lowercase =args.max_steps // (len(_lowerCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
__lowercase =len(_lowerCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowercase =list(model.named_parameters() )
__lowercase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__lowercase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__lowercase =AdamW(_lowerCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
__lowercase =get_linear_schedule_with_warmup(
_lowerCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowerCAmelCase )
if args.do_train:
__lowercase , __lowercase , __lowercase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__lowercase =0
__lowercase =0
__lowercase =tqdm(_lowerCAmelCase , desc='Training' )
for step, batch in enumerate(_lowerCAmelCase ):
__lowercase =tuple(t.to(_lowerCAmelCase ) for t in batch )
__lowercase , __lowercase , __lowercase , __lowercase =batch
__lowercase =model(_lowerCAmelCase , mc_token_ids=_lowerCAmelCase , lm_labels=_lowerCAmelCase , mc_labels=_lowerCAmelCase )
__lowercase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowercase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowercase ='Training loss: {:.2e} lr: {:.2e}'.format(_lowerCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowercase =model.module if hasattr(_lowerCAmelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowercase =os.path.join(args.output_dir , _lowerCAmelCase )
__lowercase =os.path.join(args.output_dir , _lowerCAmelCase )
torch.save(model_to_save.state_dict() , _lowerCAmelCase )
model_to_save.config.to_json_file(_lowerCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowercase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowercase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowerCAmelCase )
if args.do_eval:
model.eval()
__lowercase , __lowercase =0, 0
__lowercase , __lowercase =0, 0
for batch in tqdm(_lowerCAmelCase , desc='Evaluating' ):
__lowercase =tuple(t.to(_lowerCAmelCase ) for t in batch )
__lowercase , __lowercase , __lowercase , __lowercase =batch
with torch.no_grad():
__lowercase , __lowercase , __lowercase , __lowercase =model(
_lowerCAmelCase , mc_token_ids=_lowerCAmelCase , lm_labels=_lowerCAmelCase , mc_labels=_lowerCAmelCase )
__lowercase =mc_logits.detach().cpu().numpy()
__lowercase =mc_labels.to('cpu' ).numpy()
__lowercase =accuracy(_lowerCAmelCase , _lowerCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowercase =eval_loss / nb_eval_steps
__lowercase =eval_accuracy / nb_eval_examples
__lowercase =tr_loss / nb_tr_steps if args.do_train else None
__lowercase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__lowercase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(_lowerCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _lowerCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 166
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """umt5"""
lowerCAmelCase__ = ["""past_key_values"""]
def __init__( self : Optional[int] , _lowerCAmelCase : int=2_5_0_1_1_2 , _lowerCAmelCase : Union[str, Any]=5_1_2 , _lowerCAmelCase : List[Any]=6_4 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : str=3_2 , _lowerCAmelCase : List[str]=1_2_8 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=1e-6 , _lowerCAmelCase : List[Any]=1.0 , _lowerCAmelCase : Union[str, Any]="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple="T5Tokenizer" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , **_lowerCAmelCase : int , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =vocab_size
__lowercase =d_model
__lowercase =d_kv
__lowercase =d_ff
__lowercase =num_layers
__lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase =num_heads
__lowercase =relative_attention_num_buckets
__lowercase =relative_attention_max_distance
__lowercase =dropout_rate
__lowercase =layer_norm_epsilon
__lowercase =initializer_factor
__lowercase =feed_forward_proj
__lowercase =use_cache
__lowercase =self.feed_forward_proj.split('-')
__lowercase =act_info[-1]
__lowercase =act_info[0] == 'gated'
if len(_lowerCAmelCase) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
__lowercase ='gelu_new'
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.d_model
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self.num_heads
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.num_layers
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase ='past_encoder_sequence + sequence'
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 5e-4
| 166
| 1
|
"""simple docstring"""
lowerCamelCase_ = tuple[float, float, float]
lowerCamelCase_ = tuple[float, float, float]
def __lowerCamelCase ( a_ : Pointad , a_ : Pointad ) -> Vectorad:
__SCREAMING_SNAKE_CASE :int = end_pointa[0] - end_pointa[0]
__SCREAMING_SNAKE_CASE :List[str] = end_pointa[1] - end_pointa[1]
__SCREAMING_SNAKE_CASE :str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCamelCase ( a_ : Vectorad , a_ : Vectorad ) -> Vectorad:
__SCREAMING_SNAKE_CASE :Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i
__SCREAMING_SNAKE_CASE :str = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__SCREAMING_SNAKE_CASE :Union[str, Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCamelCase ( a_ : Vectorad , a_ : int ) -> bool:
return tuple(round(a_ , a_ ) for x in vector ) == (0, 0, 0)
def __lowerCamelCase ( a_ : Pointad , a_ : Pointad , a_ : Pointad , a_ : int = 10 ) -> bool:
__SCREAMING_SNAKE_CASE :Optional[int] = create_vector(a_ , a_ )
__SCREAMING_SNAKE_CASE :Tuple = create_vector(a_ , a_ )
return is_zero_vector(get_ad_vectors_cross(a_ , a_ ) , a_ )
| 239
|
"""simple docstring"""
import math
import unittest
def __lowerCamelCase ( a_ : int ) -> bool:
assert isinstance(a_ , a_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,)
self.assertFalse(
is_prime(1 ) ,'''One only has 1 positive factor, primes must have exactly two.''' ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 239
| 1
|
from math import sqrt
def __magic_name__ ( A : int ):
'''simple docstring'''
a = 0
for i in range(1, int(sqrt(A ) + 1 ) ):
if n % i == 0 and i != sqrt(A ):
total += i + n // i
elif i == sqrt(A ):
total += i
return total - n
def __magic_name__ ( A : int = 10000 ):
'''simple docstring'''
a = sum(
i
for i in range(1, A )
if sum_of_divisors(sum_of_divisors(A ) ) == i and sum_of_divisors(A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_A : int = True
except (ImportError, ModuleNotFoundError):
_A : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
re.sub("""<n>""" , """""" , snake_case_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case_ ) )
| 229
| 0
|
"""simple docstring"""
import os
def UpperCAmelCase ( a_ = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(a_ ), a_ ) ) as input_file:
lowerCamelCase : Optional[Any] = [
[int(a_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
lowerCamelCase : Union[str, Any] = len(a_ )
lowerCamelCase : Tuple = len(matrix[0] )
lowerCamelCase : List[Any] = [[-1 for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
lowerCamelCase : List[str] = matrix[i][0]
for j in range(1, a_ ):
for i in range(a_ ):
lowerCamelCase : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, a_ ):
lowerCamelCase : List[str] = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
lowerCamelCase : int = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 205
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'ChineseCLIPImageProcessor'
lowercase_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> List[str]:
lowerCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
lowerCamelCase : int = kwargs.pop('feature_extractor' )
lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Tuple = self.image_processor
def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> str:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
lowerCamelCase : Dict = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> Optional[int]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
| 205
| 1
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __UpperCamelCase( *A_ , **A_ ):
'''simple docstring'''
pass
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Union[str, Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Dict = np.array(_lowerCAmelCase )
UpperCamelCase : Optional[int] = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
_UpperCAmelCase :Tuple = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCAmelCase :List[str] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = MaskGenerationPipeline(model=A_ , image_processor=A_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
@slow
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCamelCase : Optional[Any] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
UpperCamelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "facebook/sam-vit-huge"
UpperCamelCase : Optional[Any] = pipeline("mask-generation" , model=A_ )
UpperCamelCase : Optional[Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCamelCase : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 52
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: str = XLMTokenizer
A: Optional[Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCamelCase__ : Optional[int] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ : Optional[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = '''lower newer'''
UpperCamelCase__ : List[str] = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase__ : Tuple = '''lower'''
UpperCamelCase__ : Dict = ['''low''', '''er</w>''']
UpperCamelCase__ : Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = tokens + ['''<unk>''']
UpperCamelCase__ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Any = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCamelCase__ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 146
| 0
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_SCREAMING_SNAKE_CASE = """bart"""
_SCREAMING_SNAKE_CASE = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( ):
if LOAD_DENSE_INDEX:
snake_case_ : Dict = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
snake_case_ : Tuple = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
snake_case_ : Dict = qar_model.eval()
else:
snake_case_ ,snake_case_ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
snake_case_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
snake_case_ : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
snake_case_ : List[str] = sas_model.eval()
else:
snake_case_ ,snake_case_ : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( ):
if LOAD_DENSE_INDEX:
snake_case_ : str = faiss.StandardGpuResources()
snake_case_ : Any = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
snake_case_ : Union[str, Any] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
snake_case_ : Dict = faiss.IndexFlatIP(1_28 )
snake_case_ : Any = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE_ ) # TODO fix for larger GPU
else:
snake_case_ ,snake_case_ : Union[str, Any] = (None, None)
snake_case_ : int = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
snake_case_ : str = elia['train_eli5']
snake_case_ : List[Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
snake_case_ : List[str] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE_ )
return (elia_train, eli5_train_q_index)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_indexes()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_models()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = load_train_data()
def SCREAMING_SNAKE_CASE__ ( __a , __a=10 ):
snake_case_ : Optional[Any] = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case_ ,snake_case_ : Dict = eli5_train_q_index.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case_ : Optional[Any] = [elia_train[int(SCREAMING_SNAKE_CASE_ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE__ ( __a , __a="wiki40b" , __a="dense" , __a=10 ):
if source == "none":
snake_case_ ,snake_case_ : Tuple = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case_ ,snake_case_ : List[str] = query_qa_dense_index(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
snake_case_ ,snake_case_ : int = query_es_index(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index_name='english_wiki40b_snippets_100w' , n_results=SCREAMING_SNAKE_CASE_ , )
snake_case_ : Union[str, Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
snake_case_ : Union[str, Any] = 'question: {} context: {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __a : None),
} )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=64 , __a=2_56 , __a=False , __a=2 , __a=0.95 , __a=0.8 ):
with torch.no_grad():
snake_case_ : Any = qa_sas_generate(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , temp=SCREAMING_SNAKE_CASE_ , top_p=SCREAMING_SNAKE_CASE_ , top_k=SCREAMING_SNAKE_CASE_ , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_SCREAMING_SNAKE_CASE = """<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>"""
_SCREAMING_SNAKE_CASE = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_SCREAMING_SNAKE_CASE = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_SCREAMING_SNAKE_CASE = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Demo options""")
if demo_options:
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_SCREAMING_SNAKE_CASE = action_list.index(action_st)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_SCREAMING_SNAKE_CASE = show_type == """Show full text of passages"""
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_SCREAMING_SNAKE_CASE = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_SCREAMING_SNAKE_CASE = """wiki40b"""
_SCREAMING_SNAKE_CASE = """dense"""
_SCREAMING_SNAKE_CASE = """beam"""
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 64
_SCREAMING_SNAKE_CASE = 2_56
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Generation options""")
if generate_options:
_SCREAMING_SNAKE_CASE = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
"""
st.sidebar.markdown(generate_info)
_SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_SCREAMING_SNAKE_CASE = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
_SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
_SCREAMING_SNAKE_CASE = None
# start main text
_SCREAMING_SNAKE_CASE = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What\'s the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What\'s the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_SCREAMING_SNAKE_CASE = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_SCREAMING_SNAKE_CASE = st.text_input("""Enter your question here:""", """""")
else:
_SCREAMING_SNAKE_CASE = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_SCREAMING_SNAKE_CASE = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_SCREAMING_SNAKE_CASE = support_list[:10]
_SCREAMING_SNAKE_CASE = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_SCREAMING_SNAKE_CASE = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_SCREAMING_SNAKE_CASE = res[1].strip()
if sec_titles == "":
_SCREAMING_SNAKE_CASE = """[{}]({})""".format(res[0], wiki_url)
else:
_SCREAMING_SNAKE_CASE = sec_titles.split(""" & """)
_SCREAMING_SNAKE_CASE = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_SCREAMING_SNAKE_CASE = find_nearest_training(question)
_SCREAMING_SNAKE_CASE = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_SCREAMING_SNAKE_CASE = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_SCREAMING_SNAKE_CASE = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 366
|
_SCREAMING_SNAKE_CASE = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 88
| 0
|
from __future__ import annotations
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> List[str]:
'''simple docstring'''
A__ = str(snake_case__ )
A__ = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> str:
'''simple docstring'''
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_1 ) -> Optional[Any]:
'''simple docstring'''
A__ = []
A__ = 1_3
while len(snake_case__ ) != count:
if validate(snake_case__ ):
A__ = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(1_1)) = }""")
| 68
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple )
| 74
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( a__):
lowerCAmelCase_ = """deta"""
lowerCAmelCase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A_=None , A_=900 , A_=2048 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=1024 , A_=8 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=True , A_=False , A_="sine" , A_=5 , A_=4 , A_=4 , A_=True , A_=300 , A_=True , A_=True , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , A_=0.25 , **A_ , )-> Optional[Any]:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(A_ , A_ ):
UpperCamelCase = backbone_config.pop('model_type' )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(A_ )
UpperCamelCase = backbone_config
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return self.d_model
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 370
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 251
| 0
|
'''simple docstring'''
import functools
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ) or not all(isinstance(UpperCamelCase , UpperCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(UpperCamelCase ) != 3 or not all(isinstance(UpperCamelCase , UpperCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(UpperCamelCase ) == 0:
return 0
if min(UpperCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(UpperCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
lowerCAmelCase__ : Any = set(UpperCamelCase )
@functools.cache
def dynamic_programming(UpperCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase ) == 26
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Any = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Optional[Any] = True
elif char.isupper():
lowerCAmelCase__ : Any = True
return all(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
snake_case__ = parser.parse_args()
if args.model_type == "bert":
snake_case__ = BertForMaskedLM.from_pretrained(args.model_name)
snake_case__ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
snake_case__ = model.state_dict()
snake_case__ = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case__ = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
snake_case__ = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
snake_case__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
snake_case__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
snake_case__ = state_dict['''cls.predictions.decoder.weight''']
snake_case__ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case__ = state_dict[f"""cls.predictions.transform.dense.{w}"""]
snake_case__ = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 370
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _snake_case ( _snake_case : Optional[int] ):
lowerCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase, lowerCAmelCase : str = emb.weight.shape
lowerCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase : Tuple = emb.weight.data
return lin_layer
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Dict=None ):
lowerCAmelCase : Union[str, Any] = {}
for old_key in state_dict.keys():
lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCAmelCase : str = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCAmelCase : Optional[Any] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCAmelCase : Any = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCAmelCase : Tuple = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCAmelCase : List[str] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCAmelCase : int = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCAmelCase : List[str] = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCAmelCase : Tuple = state_dict[old_key]
return new_dict
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str = WEIGHTS_NAME ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Tuple = 0
os.makedirs(_snake_case , exist_ok=_snake_case )
for expert in range(_snake_case ):
lowerCAmelCase : Any = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_snake_case ):
lowerCAmelCase : List[str] = torch.load(_snake_case )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Any = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Any = os.path.join(
_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
torch.save(_snake_case , _snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_snake_case )[0]].dtype )
# Add the last block
lowerCAmelCase : List[str] = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{len(_snake_case )+1:05d}-of-???.bin''' ) )
lowerCAmelCase : str = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase : Union[str, Any] = rename_fairseq_keys(_snake_case , _snake_case )
lowerCAmelCase : Dict = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_snake_case ) == 1:
lowerCAmelCase : List[str] = os.path.join(_snake_case , _snake_case )
torch.save(_snake_case , _snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_snake_case , _snake_case )
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
for idx, shard in enumerate(_snake_case ):
lowerCAmelCase : Union[str, Any] = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_snake_case ):05d}.bin''' )
lowerCAmelCase : Any = os.path.join(_snake_case , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_snake_case , os.path.join(_snake_case , _snake_case ) )
for key in shard:
lowerCAmelCase : List[Any] = shard_file
# Add the metadata
lowerCAmelCase : Dict = {'''total_size''': total_size}
lowerCAmelCase : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case ) + '''\n'''
f.write(_snake_case )
return metadata, index
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
snake_case__ : List[str] = parser.parse_args()
snake_case__ , snake_case__ : Tuple = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
snake_case__ : str = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
snake_case__ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 314
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """camembert"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = classifier_dropout
class __A ( lowerCAmelCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 209
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->None:
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
| 273
|
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0) ->None:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any =row, column
lowerCamelCase__: List[str] =[[default_value for c in range(UpperCAmelCase_)] for r in range(UpperCAmelCase_)]
def __str__(self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCamelCase__: List[str] =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__: int =max(UpperCAmelCase_ , len(str(UpperCAmelCase_)))
lowerCamelCase__: Any =F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase_ : list[float]) -> str:
nonlocal string_format_identifier
lowerCamelCase__: Tuple ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_) for row_vector in self.array)
return s
def __repr__(self : Optional[int]) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase_ , (list, tuple)) and len(UpperCAmelCase_) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : int , UpperCAmelCase_ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
return self.array[loc[0]][loc[1]]
def __setitem__(self : Optional[Any] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
lowerCamelCase__: str =value
def __add__(self : Dict , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__: Dict =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: List[str] =self[r, c] + another[r, c]
return result
def __neg__(self : str) ->Matrix:
'''simple docstring'''
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =-self[r, c]
return result
def __sub__(self : str , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__(self : List[str] , UpperCAmelCase_ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (int, float)): # Scalar multiplication
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__: Dict =Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__: int =F"""Unsupported type given for another ({type(UpperCAmelCase_)})"""
raise TypeError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Matrix:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Optional[int] =self[r, c]
return result
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__: Tuple =v.transpose()
lowerCamelCase__: Optional[Any] =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: List[str] =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__: Union[str, Any] =1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCamelCase__: Optional[int] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =1, 2, -3
lowerCamelCase__: Optional[Any] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__a , __a )}""" )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 273
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( A : list[int] , A : int ) -> bool:
UpperCAmelCase_ : Optional[int] = len(A )
UpperCAmelCase_ : Tuple = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase_ : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase_ : List[str] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase_ : str = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase_ : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304
| 1
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=None ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
if token is not None:
_SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
_SCREAMING_SNAKE_CASE =f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_SCREAMING_SNAKE_CASE =requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
_SCREAMING_SNAKE_CASE ={}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
_SCREAMING_SNAKE_CASE =math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =requests.get(url + f"&page={i + 2}" , headers=_UpperCamelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
if token is not None:
_SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
_SCREAMING_SNAKE_CASE =f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_SCREAMING_SNAKE_CASE =requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
_SCREAMING_SNAKE_CASE ={}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
_SCREAMING_SNAKE_CASE =math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =requests.get(url + f"&page={i + 2}" , headers=_UpperCamelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
if token is not None:
_SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': f"Bearer {token}"}
_SCREAMING_SNAKE_CASE =requests.get(_UpperCamelCase , headers=_UpperCamelCase , allow_redirects=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =result.headers['Location']
_SCREAMING_SNAKE_CASE =requests.get(_UpperCamelCase , allow_redirects=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , f"{artifact_name}.zip" )
with open(_UpperCamelCase , 'wb' ) as fp:
fp.write(response.content )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =None
with zipfile.ZipFile(_UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_UpperCamelCase ) as f:
for line in f:
_SCREAMING_SNAKE_CASE =line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_SCREAMING_SNAKE_CASE =line[: line.index(': ' )]
_SCREAMING_SNAKE_CASE =line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
_SCREAMING_SNAKE_CASE =line[len('FAILED ' ) :]
failed_tests.append(_UpperCamelCase )
elif filename == "job_name.txt":
_SCREAMING_SNAKE_CASE =line
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCamelCase )} for `errors` "
f"and {len(_UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
' problem.' )
_SCREAMING_SNAKE_CASE =None
if job_name and job_links:
_SCREAMING_SNAKE_CASE =job_links.get(_UpperCamelCase , _UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
_SCREAMING_SNAKE_CASE =[x + [y] + [job_link] for x, y in zip(_UpperCamelCase , _UpperCamelCase )]
return result
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str=None ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[os.path.join(_UpperCamelCase , _UpperCamelCase ) for p in os.listdir(_UpperCamelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_UpperCamelCase , job_links=_UpperCamelCase ) )
return errors
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Counter()
counter.update([x[1] for x in logs] )
_SCREAMING_SNAKE_CASE =counter.most_common()
_SCREAMING_SNAKE_CASE ={}
for error, count in counts:
if error_filter is None or error not in error_filter:
_SCREAMING_SNAKE_CASE ={'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
_SCREAMING_SNAKE_CASE =dict(sorted(r.items() , key=lambda _UpperCamelCase : item[1]["count"] , reverse=_UpperCamelCase ) )
return r
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =test.split('::' )[0]
if test.startswith('tests/models/' ):
_SCREAMING_SNAKE_CASE =test.split('/' )[2]
else:
_SCREAMING_SNAKE_CASE =None
return test
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : str=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[(x[0], x[1], get_model(x[2] )) for x in logs]
_SCREAMING_SNAKE_CASE =[x for x in logs if x[2] is not None]
_SCREAMING_SNAKE_CASE ={x[2] for x in logs}
_SCREAMING_SNAKE_CASE ={}
for test in tests:
_SCREAMING_SNAKE_CASE =Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_SCREAMING_SNAKE_CASE =counter.most_common()
_SCREAMING_SNAKE_CASE ={error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_SCREAMING_SNAKE_CASE =sum(error_counts.values() )
if n_errors > 0:
_SCREAMING_SNAKE_CASE ={'count': n_errors, 'errors': error_counts}
_SCREAMING_SNAKE_CASE =dict(sorted(r.items() , key=lambda _UpperCamelCase : item[1]["count"] , reverse=_UpperCamelCase ) )
return r
def _lowerCAmelCase ( _UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='| no. | error | status |'
_SCREAMING_SNAKE_CASE ='|-:|:-|:-|'
_SCREAMING_SNAKE_CASE =[header, sep]
for error in reduced_by_error:
_SCREAMING_SNAKE_CASE =reduced_by_error[error]['count']
_SCREAMING_SNAKE_CASE =f"| {count} | {error[:1_00]} | |"
lines.append(_UpperCamelCase )
return "\n".join(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='| model | no. of errors | major error | count |'
_SCREAMING_SNAKE_CASE ='|-:|-:|-:|-:|'
_SCREAMING_SNAKE_CASE =[header, sep]
for model in reduced_by_model:
_SCREAMING_SNAKE_CASE =reduced_by_model[model]['count']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =list(reduced_by_model[model]['errors'].items() )[0]
_SCREAMING_SNAKE_CASE =f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(_UpperCamelCase )
return "\n".join(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
lowerCamelCase : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase : int = k.find(" / ")
lowerCamelCase : List[Any] = k[index + len(" / ") :]
lowerCamelCase : Optional[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase : Optional[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase : List[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase : Any = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase : List[str] = reduce_by_error(errors)
lowerCamelCase : Union[str, Any] = reduce_by_model(errors)
lowerCamelCase : str = make_github_table(reduced_by_error)
lowerCamelCase : str = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 355
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase : List[str] = {"UserAgent": UserAgent().random}
def _lowerCAmelCase ( _UpperCamelCase : str ) -> dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =script.contents[0]
_SCREAMING_SNAKE_CASE =json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A__ :
def __init__( self : int , _a : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =f"https://www.instagram.com/{username}/"
_SCREAMING_SNAKE_CASE =self.get_json()
def A ( self : Optional[int] ) -> dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =requests.get(self.url , headers=_a ).text
_SCREAMING_SNAKE_CASE =BeautifulSoup(_a , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
'''simple docstring'''
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
'''simple docstring'''
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def A ( self : List[Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def A ( self : str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def A ( self : Any ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def A ( self : Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def A ( self : List[str] ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def A ( self : Dict ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def A ( self : Tuple ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def _lowerCAmelCase ( _UpperCamelCase : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_SCREAMING_SNAKE_CASE =InstagramUser(_UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[int] = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 114
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :str = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = ['ChineseCLIPFeatureExtractor']
__a :Optional[int] = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__a :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
super().__init__(
errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase , encoding='utf-8' ) as merges_handle:
snake_case = merges_handle.read().split('\n' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case = tuple(lowerCAmelCase )
snake_case = get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
snake_case = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case = bigram
snake_case = []
snake_case = 0
while i < len(lowerCAmelCase ):
try:
snake_case = word.index(lowerCAmelCase , lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(lowerCAmelCase )
snake_case = new_word
if len(lowerCAmelCase ) == 1:
break
else:
snake_case = get_pairs(lowerCAmelCase )
snake_case = ' '.join(lowerCAmelCase )
snake_case = word
return word
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for token in re.findall(self.pat , lowerCAmelCase ):
snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''.join(lowerCAmelCase )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
snake_case = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
snake_case = token_index
writer.write(' '.join(lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ):
"""simple docstring"""
snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()):
snake_case = ' ' + text
return (text, kwargs)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = super()._pad(
encoded_inputs=lowerCAmelCase , max_length=lowerCAmelCase , padding_strategy=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase )
if needs_to_be_padded:
snake_case = len(lowerCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 150
| 0
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: int=False ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str]=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : Optional[Any] = ""
else:
_UpperCAmelCase : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : int = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Optional[int]:
_UpperCAmelCase : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict ) -> Tuple:
_UpperCAmelCase : str = dct.pop(lowerCAmelCase )
_UpperCAmelCase : Any = val
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple , lowerCAmelCase: int , lowerCAmelCase: List[Any]=False ) -> Any:
_UpperCAmelCase : List[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowerCAmelCase , )
_UpperCAmelCase : Optional[Any] = ViTHybridConfig(backbone_config=lowerCAmelCase , image_size=384 , num_labels=1000 )
_UpperCAmelCase : str = False
# load original model from timm
_UpperCAmelCase : Optional[Any] = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : str = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase )
_UpperCAmelCase : str = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : str = "huggingface/label-files"
_UpperCAmelCase : Tuple = "imagenet-1k-id2label.json"
_UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Any = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict = idalabel
_UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase : Union[str, Any] = ViTHybridModel(lowerCAmelCase ).eval()
else:
_UpperCAmelCase : Optional[Any] = ViTHybridForImageClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# create image processor
_UpperCAmelCase : Any = create_transform(**resolve_data_config({} , model=lowerCAmelCase ) )
_UpperCAmelCase : Tuple = transform.transforms
_UpperCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_UpperCAmelCase : Any = ViTHybridImageProcessor(
do_resize=lowerCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : List[Any] = transform(lowerCAmelCase ).unsqueeze(0 )
_UpperCAmelCase : Optional[Any] = processor(lowerCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase , lowerCAmelCase )
# verify logits
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase : List[Any] = timm_model.forward_features(lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_UpperCAmelCase : Any = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 189
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335
| 1
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = tempfile.mkdtemp()
__A : Tuple = 8
# DPR tok
__A : Any = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__A : List[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__A : Tuple = os.path.join(__lowerCamelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__A : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__A : Union[str, Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__A : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__A : Optional[int] = {'''unk_token''': '''<unk>'''}
__A : Optional[Any] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__A : str = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__A : int = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_dummy_dataset()
__A : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__A : Optional[int] = dataset
__A : Tuple = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : str = self.get_dummy_dataset()
__A : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__A : Dict = os.path.join(self.tmpdirname , '''dataset''' )
__A : Dict = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__A : Optional[Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__A : Union[str, Any] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase ) , )
return retriever
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__A : Any = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__A : Tuple = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__A : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__lowerCamelCase , open(__lowerCamelCase , '''wb''' ) )
__A : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__A : Optional[int] = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = 1
__A : str = self.get_dummy_canonical_hf_index_retriever()
__A : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A , __A , __A : Any = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__A : str = self.get_dummy_dataset()
retriever.save_pretrained(__lowerCamelCase )
__A : List[Any] = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__A : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A : int = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = 1
__A : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
__A : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A , __A , __A : str = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
__A : str = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A : List[Any] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = 1
__A : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
__A : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A , __A , __A : Optional[Any] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
__A : Optional[int] = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__A : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A : List[str] = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = 1
__A : Optional[Any] = self.get_dummy_legacy_index_retriever()
__A : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A , __A , __A : Any = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCamelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __lowerCamelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCamelCase )
__A : Union[str, Any] = RagRetriever.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__A : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A : Dict = retriever.retrieve(__lowerCamelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__( self ):
'''simple docstring'''
import torch
__A : str = 1
__A : Tuple = self.get_dummy_canonical_hf_index_retriever()
__A : Dict = [[5, 7], [10, 11]]
__A : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A : int = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
__A , __A , __A : int = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , np.ndarray )
__A : str = retriever(
__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase , return_tensors='''pt''' , )
__A , __A , __A , __A : Dict = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.get_dpr_ctx_encoder_tokenizer()
__A : str = 1
__A : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase )
retriever.set_ctx_encoder_tokenizer(__lowerCamelCase )
__A : Union[str, Any] = [[5, 7], [10, 11]]
__A : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__A : int = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase )
self.assertEqual(
len(__lowerCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __lowerCamelCase ) # check for doc token related keys in dictionary.
| 291
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = {}
if "candidate_labels" in kwargs:
__A : Tuple = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__A : List[str] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase="This is a photo of {}." ):
'''simple docstring'''
__A : Optional[int] = load_image(__lowerCamelCase )
__A : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__A : int = candidate_labels
__A : int = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
__A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
__A : int = [text_inputs]
return inputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_inputs.pop('''candidate_labels''' )
__A : str = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCamelCase ):
__A : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__A : str = text_inputs[0][0]
__A : List[str] = self.model(**__lowerCamelCase , **__lowerCamelCase )
__A : Dict = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_outputs.pop('''candidate_labels''' )
__A : int = model_outputs['''logits'''][0]
if self.framework == "pt":
__A : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__A : Dict = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = [scores]
elif self.framework == "tf":
__A : List[Any] = stable_softmax(__lowerCamelCase , axis=-1 )
__A : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
__A : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result
| 291
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = len(lowerCAmelCase )
for i in range(length - 1 ):
_lowerCAmelCase = i
for k in range(i + 1 , lowerCAmelCase ):
if collection[k] < collection[least]:
_lowerCAmelCase = k
if least != i:
_lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A__ : str =input('''Enter numbers separated by a comma:\n''').strip()
A__ : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 70
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __a ( A__ ):
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCamelCase__ : Any = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : Tuple = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : str = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCamelCase__ : Optional[Any] = partial(SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Any = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 189
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = [0 for i in range(n + 1 )]
_UpperCamelCase = 1
_UpperCamelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, __snake_case ):
_UpperCamelCase = 1
_UpperCamelCase = 0
for i in range(__snake_case ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> Any:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 100
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 157
|
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = abs(snake_case__ )
__UpperCAmelCase : Dict = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Tuple = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase ( snake_case__ ) -> int:
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def _UpperCamelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : Union[str, Any] = f'''{func.__name__}({value})'''
__UpperCAmelCase : List[str] = timeit(f'''__main__.{call}''', setup="import __main__" )
print(f'''{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__, snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 157
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : int =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> str:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , """decord""" )
self.check_model_type(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None , __lowercase=None , __lowercase=None ) -> str:
"""simple docstring"""
a__ : str = {}
if frame_sampling_rate is not None:
a__ : str = frame_sampling_rate
if num_frames is not None:
a__ : Optional[int] = num_frames
a__ : Tuple = {}
if top_k is not None:
a__ : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=None , __lowercase=1 ) -> Optional[Any]:
"""simple docstring"""
if num_frames is None:
a__ : str = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
a__ : Tuple = BytesIO(requests.get(__lowercase ).content )
a__ : Any = VideoReader(__lowercase )
videoreader.seek(0 )
a__ : int = 0
a__ : int = num_frames * frame_sampling_rate - 1
a__ : Optional[Any] = np.linspace(__lowercase , __lowercase , num=__lowercase , dtype=np.intaa )
a__ : Any = videoreader.get_batch(__lowercase ).asnumpy()
a__ : Optional[Any] = list(__lowercase )
a__ : Optional[Any] = self.image_processor(__lowercase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : Any = self.model(**__lowercase )
return model_outputs
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=5 ) -> Tuple:
"""simple docstring"""
if top_k > self.model.config.num_labels:
a__ : str = self.model.config.num_labels
if self.framework == "pt":
a__ : List[Any] = model_outputs.logits.softmax(-1 )[0]
a__ , a__ : int = probs.topk(__lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
a__ : Tuple = scores.tolist()
a__ : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowercase , __lowercase )]
| 266
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( _lowercase : List[str]) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : Union[str, Any] = emb.weight.shape
a__ : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
a__ : Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int=None) -> List[Any]:
"""simple docstring"""
a__ : List[str] = {}
for old_key in state_dict.keys():
a__ : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a__ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''')
else:
a__ : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""")
if "gate" in key:
a__ : Tuple = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""")
if "fc2" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc2.""" , """.ffn.fc2.""")
if "fc1" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""")
if ".encoder_attn." in key:
a__ : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""")
if "encoder_attn_layer_norm" in key:
a__ : Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""")
if "final_layer_norm" in key:
a__ : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""")
a__ : str = state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : str = WEIGHTS_NAME) -> Tuple:
"""simple docstring"""
a__ : Tuple = []
a__ : Optional[Any] = 0
os.makedirs(_lowercase , exist_ok=_lowercase)
for expert in range(_lowercase):
a__ : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase):
a__ : List[str] = torch.load(_lowercase)["""model"""]
remove_ignore_keys_(_lowercase)
a__ : Tuple = rename_fairseq_keys(_lowercase , _lowercase)
a__ : str = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
torch.save(_lowercase , _lowercase)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(_lowercase)[0]].dtype)
# Add the last block
a__ : int = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
a__ : Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""")["""model"""]
remove_ignore_keys_(_lowercase)
a__ : List[str] = rename_fairseq_keys(_lowercase , _lowercase)
a__ : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase) == 1:
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
torch.save(_lowercase , _lowercase)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase)
# Otherwise, let's build the index
a__ : List[str] = {}
for idx, shard in enumerate(_lowercase):
a__ : Union[str, Any] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase):05d}.bin''')
a__ : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_lowercase , os.path.join(_lowercase , _lowercase))
for key in shard:
a__ : Tuple = shard_file
# Add the metadata
a__ : Tuple = {"""total_size""": total_size}
a__ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase) , """w""" , encoding="""utf-8""") as f:
a__ : Dict = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase) + """\n"""
f.write(_lowercase)
return metadata, index
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase : Tuple =parser.parse_args()
_lowercase , _lowercase : List[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowercase : int =NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowercase : List[str] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 266
| 1
|
"""simple docstring"""
# Function to print upper half of diamond (pyramid)
def _snake_case ( snake_case__ : Dict ):
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def _snake_case ( snake_case__ : Tuple ):
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def _snake_case ( snake_case__ : Dict ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
_lowercase = 1
while K:
_lowercase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_lowercase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 74
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = PegasusConfig
__snake_case = {}
__snake_case = '''gelu'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=40 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def UpperCamelCase__ ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = TFPegasusModel(config=lowerCAmelCase__ ).get_decoder()
snake_case_ = inputs_dict["input_ids"]
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict["attention_mask"][:1, :]
snake_case_ = inputs_dict["head_mask"]
snake_case_ = 1
# first forward pass
snake_case_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
snake_case_ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , )-> List[str]:
"""simple docstring"""
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = TFPegasusModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__snake_case = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__snake_case = '''google/pegasus-xsum'''
@cached_property
def UpperCamelCase__ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
snake_case_ = self.translate_src_text(**lowerCAmelCase__ )
assert self.expected_text == generated_words
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
snake_case_ = self.tokenizer(self.src_text , **lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''tf''' )
snake_case_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase__ , )
snake_case_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )
return generated_words
@slow
def UpperCamelCase__ ( self ):
self._assert_generated_batch_equal_expected()
| 367
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> list[int]:
"""simple docstring"""
snake_case_ = int(SCREAMING_SNAKE_CASE )
# Initialize Result
snake_case_ = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE ):
# Find denominations
while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ):
total_value -= int(SCREAMING_SNAKE_CASE )
answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase = []
UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 267
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowercase :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__ : Tuple=[2, 3, 4] , SCREAMING_SNAKE_CASE__ : Tuple=1 , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = num_groups
def a ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Optional[Any] ) -> Optional[Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
__lowerCAmelCase = BitModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = BitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
__lowerCAmelCase = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a ( self : List[str] ) -> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Optional[int] = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : str = False
def a ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = BitModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Optional[int] ) -> Dict:
return
@unittest.skip(reason="""Bit does not output attentions""" )
def a ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def a ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def a ( self : List[Any] ) -> List[Any]:
pass
def a ( self : Any ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def a ( self : Tuple ) -> Optional[int]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
__lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def a ( self : str ) -> Any:
pass
def a ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : List[Any] ) -> str:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = BitModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a ( self : List[Any] ) -> List[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def a ( self : List[str] ) -> List[Any]:
__lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@require_torch
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = (BitBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Optional[Any] = BitConfig
_SCREAMING_SNAKE_CASE : Any = False
def a ( self : Any ) -> Tuple:
__lowerCAmelCase = BitModelTester(self )
| 229
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : Any = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = """ctrl"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=24_65_34 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12_80 , SCREAMING_SNAKE_CASE__ : str=81_92 , SCREAMING_SNAKE_CASE__ : List[str]=48 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1e-6 , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = dff
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 229
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '▁'
__UpperCamelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCamelCase : List[str] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCamelCase : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Dict = len(self.sp_model )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ )
}
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : Any = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __A ( self : Tuple , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : Union[str, Any] = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tgt_lang_id
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self : int , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __A ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = src_lang
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def __A ( self : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
| 258
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """encodec"""
def __init__( self : int , UpperCamelCase__ : Optional[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase__ : List[str]=2_4000 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=128 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any=[8, 5, 4, 2] , UpperCamelCase__ : Optional[Any]="weight_norm" , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]="reflect" , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : Dict=1024 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = target_bandwidths
SCREAMING_SNAKE_CASE : int = sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = audio_channels
SCREAMING_SNAKE_CASE : List[str] = normalize
SCREAMING_SNAKE_CASE : Any = chunk_length_s
SCREAMING_SNAKE_CASE : Optional[int] = overlap
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_filters
SCREAMING_SNAKE_CASE : int = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Dict = norm_type
SCREAMING_SNAKE_CASE : List[Any] = kernel_size
SCREAMING_SNAKE_CASE : int = last_kernel_size
SCREAMING_SNAKE_CASE : str = residual_kernel_size
SCREAMING_SNAKE_CASE : int = dilation_growth_rate
SCREAMING_SNAKE_CASE : List[Any] = use_causal_conv
SCREAMING_SNAKE_CASE : List[Any] = pad_mode
SCREAMING_SNAKE_CASE : str = compress
SCREAMING_SNAKE_CASE : Dict = num_lstm_layers
SCREAMING_SNAKE_CASE : List[str] = trim_right_ratio
SCREAMING_SNAKE_CASE : Optional[int] = codebook_size
SCREAMING_SNAKE_CASE : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : List[str] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCamelCase__ )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase_ = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = cn.convert_to_negative(A__ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(A__ , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCamelCase = canny.canny(A__ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
assert gg.gaussian_filter(A__ , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCamelCase = conv.img_convolve(A__ , A__ ).astype(A__ )
assert res.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
assert med.median_filter(A__ , 3 ).any()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = sob.sobel_filter(A__ )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = sp.make_sepia(A__ , 20 )
assert sepia.all()
def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCamelCase = bs.Burkes(imread(A__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( A__ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCamelCase = rs.NearestNeighbour(imread(A__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__lowerCamelCase = imread(A__ , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = image[x_coordinate][y_coordinate]
__lowerCamelCase = lbp.get_neighbors_pixel(
A__ , A__ , A__ , A__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCamelCase = lbp.local_binary_value(A__ , A__ , A__ )
assert lbp_image.any()
| 12
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 74
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : int = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
lowerCAmelCase_ : str = '''▁'''
class __lowerCAmelCase ( __a ):
snake_case : List[str] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
snake_case : List[Any] = BarthezTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , **lowerCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = vocab_file
_UpperCAmelCase : Tuple = False if not self.vocab_file else True
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
_UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 170
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'MCTCTFeatureExtractor'
a : str = 'AutoTokenizer'
def __init__( self : Tuple , __lowercase : int , __lowercase : Dict ) -> Any:
super().__init__(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = self.feature_extractor
__UpperCAmelCase : Optional[int] = False
def __call__( self : int , *__lowercase : Tuple , **__lowercase : Optional[int] ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__UpperCAmelCase : Dict = kwargs.pop("""raw_speech""" )
else:
__UpperCAmelCase : Dict = kwargs.pop("""audio""" , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop("""sampling_rate""" , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop("""text""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Tuple = args[0]
__UpperCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(__lowercase , **__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCAmelCase : Dict = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : int ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : List[str] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = kwargs.pop("""input_features""" , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""labels""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Union[str, Any] = args[0]
__UpperCAmelCase : str = args[1:]
if input_features is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
if labels is not None:
__UpperCAmelCase : Union[str, Any] = self.tokenizer.pad(__lowercase , **__lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCAmelCase : Any = labels["""input_ids"""]
return input_features
def UpperCAmelCase ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Dict ) -> List[Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@contextmanager
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = self.tokenizer
yield
__UpperCAmelCase : List[Any] = self.feature_extractor
__UpperCAmelCase : int = False
| 114
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class snake_case_ ( __A ):
__A : str = "imagegpt"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , lowercase_ : Union[str, Any]=5_12 + 1 , lowercase_ : Optional[int]=32 * 32 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=24 , lowercase_ : Any=8 , lowercase_ : List[str]=None , lowercase_ : Optional[Any]="quick_gelu" , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=1E-5 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Tuple=False , lowercase_ : Any=False , **lowercase_ : Dict , ) -> Any:
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Optional[Any] = n_positions
lowercase__ : Tuple = n_embd
lowercase__ : List[str] = n_layer
lowercase__ : str = n_head
lowercase__ : Optional[int] = n_inner
lowercase__ : Any = activation_function
lowercase__ : Optional[Any] = resid_pdrop
lowercase__ : int = embd_pdrop
lowercase__ : Tuple = attn_pdrop
lowercase__ : Any = layer_norm_epsilon
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = scale_attn_weights
lowercase__ : Any = use_cache
lowercase__ : Tuple = scale_attn_by_inverse_layer_idx
lowercase__ : Union[str, Any] = reorder_and_upcast_attn
lowercase__ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase_ , **lowercase_ )
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : "FeatureExtractionMixin" , lowercase_ : int = 1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 32 , lowercase_ : int = 32 , ) -> Mapping[str, Any]:
lowercase__ : Tuple = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
return inputs
| 333
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Tuple = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 236
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : Dict = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=8 ):
lowercase :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase :List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Tuple , _lowerCAmelCase: UNetaDConditionModel , _lowerCAmelCase: DDPMScheduler , _lowerCAmelCase: VQModel , ):
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
lowercase :List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] ):
if latents is None:
lowercase :int = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowercase :Optional[Any] = latents.to(_lowerCAmelCase )
lowercase :int = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase :List[Any] = torch.device(F"cuda:{gpu_id}" )
lowercase :List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Dict=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase :List[Any] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase :List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase :Dict = cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
lowercase :Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self: int ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self: str , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: int = 5_12 , _lowerCAmelCase: int = 5_12 , _lowerCAmelCase: int = 1_00 , _lowerCAmelCase: float = 4.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , ):
lowercase :str = self._execution_device
lowercase :List[str] = guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Any = torch.cat(_lowerCAmelCase , dim=0 )
lowercase :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :List[Any] = torch.cat(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowercase :int = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
lowercase :Any = negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
lowercase :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler.timesteps
lowercase :Tuple = self.unet.config.in_channels
lowercase , lowercase :Optional[int] = downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
lowercase :List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase :List[str] = {"image_embeds": image_embeds}
lowercase :List[Any] = self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
lowercase , lowercase :List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase :Any = noise_pred.chunk(2 )
lowercase , lowercase :Dict = variance_pred.chunk(2 )
lowercase :str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase :Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase :str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase :Tuple = self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
lowercase :Dict = self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase :Any = image * 0.5 + 0.5
lowercase :Tuple = image.clamp(0 , 1 )
lowercase :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase :List[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 236
| 1
|
"""simple docstring"""
import os
from collections.abc import Iterator
def UpperCAmelCase_ ( a_ = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(a_ ):
lowerCamelCase : Any = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a_ )[1] in (".py", ".ipynb"):
yield os.path.join(a_, a_ ).lstrip('./' )
def UpperCAmelCase_ ( a_ ):
'''simple docstring'''
return F"""{i * ' '}*""" if i else "\n##"
def UpperCAmelCase_ ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a_ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(a_ )} {new_part.replace('_', ' ' ).title()}""" )
return new_path
def UpperCAmelCase_ ( a_ = "." ):
'''simple docstring'''
lowerCamelCase : str = ''
for filepath in sorted(good_file_paths(a_ ) ):
lowerCamelCase , lowerCamelCase : str = os.path.split(a_ )
if filepath != old_path:
lowerCamelCase : Tuple = print_path(a_, a_ )
lowerCamelCase : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCamelCase : int = F"""{filepath}/{filename}""".replace(' ', '%20' )
lowerCamelCase : int = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(F"""{md_prefix(a_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 354
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'sew-d'
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_=2 , UpperCAmelCase_=512 , UpperCAmelCase_=256 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=("p2c", "c2p") , UpperCAmelCase_="layer_norm" , UpperCAmelCase_="gelu_python" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-7 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_=False , UpperCAmelCase_=128 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=256 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase : Any = hidden_size
lowerCamelCase : Any = feat_extract_norm
lowerCamelCase : List[str] = feat_extract_activation
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : Any = list(UpperCAmelCase_ )
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = conv_bias
lowerCamelCase : Optional[int] = num_conv_pos_embeddings
lowerCamelCase : str = num_conv_pos_embedding_groups
lowerCamelCase : Optional[int] = len(self.conv_dim )
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : str = squeeze_factor
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : List[Any] = position_buckets
lowerCamelCase : Union[str, Any] = share_att_key
lowerCamelCase : Optional[int] = relative_attention
lowerCamelCase : Tuple = norm_rel_ebd
lowerCamelCase : Union[str, Any] = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : List[str] = feat_proj_dropout
lowerCamelCase : List[str] = final_dropout
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : int = feature_layer_norm_eps
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : Optional[int] = mask_time_prob
lowerCamelCase : Optional[Any] = mask_time_length
lowerCamelCase : str = mask_time_min_masks
lowerCamelCase : List[Any] = mask_feature_prob
lowerCamelCase : int = mask_feature_length
lowerCamelCase : List[Any] = mask_feature_min_masks
# ctc loss
lowerCamelCase : Optional[Any] = ctc_loss_reduction
lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase : Optional[Any] = use_weighted_layer_sum
lowerCamelCase : Dict = classifier_proj_size
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 205
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_5_5 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = self.size["""shortest_edge"""]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase_ = image_processing_a.pad(__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = image_processing_a(__UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
UpperCamelCase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
UpperCamelCase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_ = YolosImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
UpperCamelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 122
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=3 , __UpperCamelCase=3_2 , __UpperCamelCase=3 , __UpperCamelCase=1_0 , __UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase=[1, 1, 2, 1] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=3 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embeddings_size
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = depths
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_labels
UpperCamelCase_ = scope
UpperCamelCase_ = len(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModel(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = FlaxRegNetForImageClassification(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ : Any = False
A__ : List[Any] = False
A__ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase , **__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""np""" )
UpperCamelCase_ = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 122
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCamelCase__ = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCamelCase__ = '''image_qa'''
UpperCamelCase__ = AutoProcessor
UpperCamelCase__ = AutoModelForVisualQuestionAnswering
UpperCamelCase__ = ['''image''', '''text''']
UpperCamelCase__ = ['''text''']
def __init__( self :Optional[Any] , *__magic_name__ :Any , **__magic_name__ :Tuple ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :"Image" , __magic_name__ :str ):
'''simple docstring'''
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors="""pt""" )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Optional[int] ):
'''simple docstring'''
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[Any] ):
'''simple docstring'''
a = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 358
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width
__UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase : str = 1 / 100
__UpperCamelCase : Optional[int] = ""
__UpperCamelCase : List[Any] = ""
__UpperCamelCase : Union[str, Any] = ""
__UpperCamelCase : Tuple = 250
def __A ( ) -> None:
a , a = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
a = random.sample(range(len(__lowerCamelCase ) ) , 4 )
a , a , a = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a = random_chars(32 )
a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
a = []
for anno in new_annos:
a = anno[3] - anno[1]
a = anno[4] - anno[2]
a = anno[1] + width / 2
a = anno[2] + height / 2
a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCamelCase )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
a = []
a = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
a = in_file.readlines()
a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' )
a = []
for obj_list in obj_lists:
a = obj_list.rstrip("""\n""" ).split(""" """ )
a = float(obj[1] ) - float(obj[3] ) / 2
a = float(obj[2] ) - float(obj[4] ) / 2
a = float(obj[1] ) + float(obj[3] ) / 2
a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]:
a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = int(scale_x * output_size[1] )
a = int(scale_y * output_size[0] )
a = []
a = []
for i, index in enumerate(__lowerCamelCase ):
a = all_img_list[index]
path_list.append(__lowerCamelCase )
a = all_annos[index]
a = cva.imread(__lowerCamelCase )
if i == 0: # top-left
a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = bbox[2] * scale_y
a = bbox[3] * scale_x
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = bbox[2] * scale_y
a = scale_x + bbox[3] * (1 - scale_x)
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = scale_y + bbox[2] * (1 - scale_y)
a = bbox[3] * scale_x
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = scale_y + bbox[2] * (1 - scale_y)
a = scale_x + bbox[3] * (1 - scale_x)
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __A ( __lowerCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 347
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : nn.Module
__lowercase : List[nn.Module] = field(default_factory=__a )
__lowercase : list = field(default_factory=__a )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = len(list(m.modules())) == 1 or isinstance(lowerCAmelCase__ , nn.Convad) or isinstance(lowerCAmelCase__ , nn.BatchNormad)
if has_not_submodules:
self.traced.append(lowerCAmelCase__)
def __call__( self , lowerCAmelCase__):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(lowerCAmelCase__)
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCAmelCase__: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : nn.Module
__lowercase : nn.Module
__lowercase : int = 0
__lowercase : List = field(default_factory=__a )
__lowercase : List = field(default_factory=__a )
def __call__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = Tracker(self.dest)(lowerCAmelCase__).parametrized
__SCREAMING_SNAKE_CASE = Tracker(self.src)(lowerCAmelCase__).parametrized
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.src_skip , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.dest_skip , lowerCAmelCase__))
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise Exception(
f"Numbers of operations are different. Source module has {len(lowerCAmelCase__)} operations while"
f" destination module has {len(lowerCAmelCase__)}.")
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True ):
print(f"Converting {name}..." )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = ResNetForImageClassification(UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = ModuleTransfer(src=UpperCamelCase_ , dest=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase_ )
assert torch.allclose(from_model(UpperCamelCase_ ) , our_model(UpperCamelCase_ ).logits ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = f"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase_ , )
# we can use the convnext one
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase_ , )
print(f"Pushed {checkpoint_name}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True ):
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase_ , names_to_config[model_name] , UpperCamelCase_ , UpperCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__magic_name__ = parser.parse_args()
__magic_name__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 100
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''dandelin/vilt-b32-finetuned-vqa'''
__lowercase : str = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
__lowercase : Optional[int] = '''image_qa'''
__lowercase : Union[str, Any] = AutoProcessor
__lowercase : Optional[Any] = AutoModelForVisualQuestionAnswering
__lowercase : List[str] = ['''image''', '''text''']
__lowercase : Optional[Any] = ['''text''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""vision"""])
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
return self.pre_processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors="""pt""")
def snake_case_ ( self , lowerCAmelCase__):
with torch.no_grad():
return self.model(**lowerCAmelCase__).logits
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 100
| 1
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCamelCase : Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_lowerCAmelCase : int = k.replace(UpperCamelCase_ , UpperCamelCase_ )
return k
def _UpperCAmelCase (UpperCamelCase_ : dict , UpperCamelCase_ : dict ):
'''simple docstring'''
_lowerCAmelCase : Any = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase_ )
_lowerCAmelCase : List[str] = PegasusConfig(**UpperCamelCase_ )
_lowerCAmelCase : Tuple = PegasusForConditionalGeneration(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = torch_model.model.state_dict()
_lowerCAmelCase : List[str] = {}
for k, v in tf_weights.items():
_lowerCAmelCase : Union[str, Any] = rename_state_dict_key(UpperCamelCase_ )
if new_k not in sd:
raise ValueError(F"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
_lowerCAmelCase : Dict = v.T
_lowerCAmelCase : int = torch.tensor(UpperCamelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
_lowerCAmelCase : Optional[Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_lowerCAmelCase : int = mapping["""shared.weight"""]
_lowerCAmelCase : List[Any] = mapping["""shared.weight"""]
_lowerCAmelCase : Optional[int] = {k: torch.zeros_like(UpperCamelCase_ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : str = torch_model.model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], F"no matches found for the following tf keys {extra}"
return torch_model
def _UpperCAmelCase (UpperCamelCase_ : Any="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_lowerCAmelCase : List[str] = tf.train.list_variables(UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : List[str] = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase_ , desc="""converting tf checkpoint to dict""" ):
_lowerCAmelCase : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCAmelCase : Optional[Any] = tf.train.load_variable(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : str = array
return tf_weights
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
# save tokenizer first
_lowerCAmelCase : str = Path(UpperCamelCase_ ).parent.name
_lowerCAmelCase : Dict = task_specific_params[F"summarization_{dataset}"]["""max_position_embeddings"""]
_lowerCAmelCase : List[str] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase_ )
# convert model
_lowerCAmelCase : Optional[Any] = get_tf_weights_as_numpy(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = task_specific_params[F"summarization_{dataset}"]
if dataset == "large":
_lowerCAmelCase : int = task_specific_params
_lowerCAmelCase : Any = convert_pegasus(UpperCamelCase_ , UpperCamelCase_ )
torch_model.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Any = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase_ , Path(UpperCamelCase_ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCamelCase : Any = parser.parse_args()
if args.save_dir is None:
_lowerCamelCase : int = Path(args.tf_ckpt_path).parent.name
_lowerCamelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 159
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class __snake_case (_a ):
def __init__( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> None:
'''simple docstring'''
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 159
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return "".join(sorted(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return word_by_signature[signature(UpperCamelCase__ )]
_UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()})
_UpperCAmelCase : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 285
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : int = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file'''
__SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def a ( self ):
super().setUp()
snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids']
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self , snake_case=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.encode_plus(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
snake_case_ = None # Hotfixing padding = None
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case )
snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data
snake_case_ = list(sample_data.values() )
snake_case_ = list(map(tokenizer.encode , snake_case ) )
snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case , snake_case )
def a ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 285
| 1
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Union[str, Any]:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> List[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Optional[int]:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Tuple:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> Dict:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 243
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = filter(lambda __A : p.requires_grad ,model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Optional[Any] = logging.getLogger(__name__)
def _lowercase ( __A ,__A ):
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
__UpperCamelCase = ModelCheckpoint(
dirpath=__A ,filename=__A ,monitor=f"val_{metric}" ,mode="""max""" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def _lowercase ( __A ,__A ):
'''simple docstring'''
return EarlyStopping(
monitor=f"val_{metric}" ,mode="""min""" if """loss""" in metric else """max""" ,patience=__A ,verbose=__A ,)
class UpperCAmelCase__ ( pl.Callback):
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / """test_results.txt"""
__UpperCamelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , """a+""" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(lowercase , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = f"{key}: {val:.6f}\n"
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> str:
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , """test""" )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 243
| 1
|
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self : int ):
UpperCamelCase :int = (0, 0)
UpperCamelCase :List[str] = None
UpperCamelCase :Union[str, Any] = 0
UpperCamelCase :Optional[Any] = 0
UpperCamelCase :Optional[Any] = 0
def __eq__( self : Union[str, Any] , __lowerCamelCase : int ):
return self.position == cell.position
def _A ( self : Optional[Any] ):
print(self.position )
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowerCamelCase : Any=(5, 5) ):
UpperCamelCase :Tuple = np.zeros(__lowerCamelCase )
UpperCamelCase :str = world_size[0]
UpperCamelCase :List[str] = world_size[1]
def _A ( self : List[Any] ):
print(self.w )
def _A ( self : Optional[Any] , __lowerCamelCase : str ):
UpperCamelCase :str = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCamelCase :Optional[int] = cell.position[0]
UpperCamelCase :Tuple = cell.position[1]
UpperCamelCase :Optional[Any] = []
for n in neughbour_cord:
UpperCamelCase :int = current_x + n[0]
UpperCamelCase :Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCamelCase :List[str] = Cell()
UpperCamelCase :str = (x, y)
UpperCamelCase :int = cell
neighbours.append(__lowerCamelCase )
return neighbours
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :List[str] = []
UpperCamelCase :List[str] = []
_open.append(__magic_name__ )
while _open:
UpperCamelCase :Optional[Any] = np.argmin([n.f for n in _open] )
UpperCamelCase :Optional[Any] = _open[min_f]
_closed.append(_open.pop(__magic_name__ ) )
if current == goal:
break
for n in world.get_neigbours(__magic_name__ ):
for c in _closed:
if c == n:
continue
UpperCamelCase :List[Any] = current.g + 1
UpperCamelCase , UpperCamelCase :Dict = n.position
UpperCamelCase , UpperCamelCase :List[str] = goal.position
UpperCamelCase :Any = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCamelCase :Optional[int] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__magic_name__ )
UpperCamelCase :int = []
while current.parent is not None:
path.append(current.position )
UpperCamelCase :Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = Gridworld()
# Start position and goal
UpperCAmelCase_ : List[str] = Cell()
UpperCAmelCase_ : str = (0, 0)
UpperCAmelCase_ : Tuple = Cell()
UpperCAmelCase_ : int = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
UpperCAmelCase_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase_ : Union[str, Any] = 1
print(world.w)
| 38
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 249
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(__A , __A ):
raise TypeError('Input value must be a \'int\' type' )
return bin(__A ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase : int = NewType("DataClass", Any)
lowercase : Dict = NewType("DataClassType", Any)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
if isinstance(__A , __A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Callable[[str], Any]:
_snake_case = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A , __A )
def SCREAMING_SNAKE_CASE__ ( *,
__A = None , __A = None , __A = dataclasses.MISSING , __A = dataclasses.MISSING , __A = None , **__A , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__A , default=__A , default_factory=__A , **__A )
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
_snake_case = [dataclass_types]
_snake_case = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = F'--{field.name}'
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_snake_case = kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , 'UnionType' ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '?'
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = field.type.__args__[0]
_snake_case = '+'
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if hasattr(lowerCAmelCase_ , '_argument_group_name' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
_snake_case = '.'.join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
_snake_case = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=lowerCAmelCase_ )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}' )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
with open(Path(lowerCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 160
| 1
|
"""simple docstring"""
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = data
def __iter__( self : Dict):
'''simple docstring'''
for element in self.data:
yield element
def _A (__a=True ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator(even_batches=__SCREAMING_SNAKE_CASE )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _A (__a , __a , __a , __a = False ) -> Dict:
"""simple docstring"""
if iterable:
SCREAMING_SNAKE_CASE_ : Any = DummyIterableDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorDataset(torch.as_tensor(range(__SCREAMING_SNAKE_CASE ) ) )
SCREAMING_SNAKE_CASE_ : Any = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
return dl
def _A (__a , __a , __a , __a , __a , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = create_dataloader(accelerator=__SCREAMING_SNAKE_CASE , dataset_size=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _A () -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE_ : int = output.sum()
loss.backward()
batch_idxs.append(__SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _A (__a ) -> List[str]:
"""simple docstring"""
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE )
assert "only supported for multi-GPU" in str(w[-1].message )
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[str] = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : List[str] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE_ : int = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE_ : str = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = create_accelerator(even_batches=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.prepare(__SCREAMING_SNAKE_CASE )
create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = create_accelerator()
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(__SCREAMING_SNAKE_CASE )
create_dataloader(__SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=__SCREAMING_SNAKE_CASE )
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__SCREAMING_SNAKE_CASE ):
pass
assert issubclass(w[-1].category , __SCREAMING_SNAKE_CASE )
assert "only supported for map-style datasets" in str(w[-1].message )
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE_ : Dict = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = original_state
if __name__ == "__main__":
main()
| 91
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Construct model
if gpta_config_file == "":
__lowerCAmelCase: Optional[int] = GPTaConfig()
else:
__lowerCAmelCase: List[str] = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: str = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 217
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowercase__ ( _UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = argparse.ArgumentParser(add_help=_UpperCAmelCase , allow_abbrev=_UpperCAmelCase )
# The main config parser
lowercase : Optional[Any] = config_command_parser(_UpperCAmelCase )
# The subparser to add commands to
lowercase : int = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(_UpperCAmelCase , parents=[parent_parser] )
update_command_parser(_UpperCAmelCase , parents=[parent_parser] )
return config_parser
def lowercase__ ( ) -> List[str]:
'''simple docstring'''
lowercase : int = get_config_parser()
lowercase : int = config_parser.parse_args()
if not hasattr(_UpperCAmelCase , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 53
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_UpperCamelCase: Any = 'naver-clova-ix/donut-base'
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : Any = DonutProcessor.from_pretrained(lowerCAmelCase )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase : Any = self.processor.tokenajson(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase, lowerCAmelCase )
| 53
| 1
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Optional[Any] ):
A__ = AutoConfig.from_pretrained(_lowercase , **_lowercase )
A__ = AutoModelForSeqaSeqLM.from_config(_lowercase )
model.save_pretrained(_lowercase )
AutoTokenizer.from_pretrained(_lowercase ).save_pretrained(_lowercase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 237
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170
| 0
|
'''simple docstring'''
UpperCAmelCase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 187
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowerCAmelCase = tmp_path_factory.getbasetemp() / """cache"""
lowerCAmelCase = test_hf_cache_home / """datasets"""
lowerCAmelCase = test_hf_cache_home / """metrics"""
lowerCAmelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="""session""" )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _SCREAMING_SNAKE_CASE )
| 187
| 1
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__lowercase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__lowercase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__lowercase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32"""),
"""references""": datasets.Value("""int32"""),
}) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase)),
}
| 272
|
'''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272
| 1
|
'''simple docstring'''
def UpperCAmelCase_ (__a : float , __a : int ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__a ) , __a )
return number - int(__a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 5
|
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_a : Optional[int] = ''
_a : List[str] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_a, _a : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_a : Optional[Any] = [1 for i in range(len(__a ) )]
# for each character in new_string find corresponding palindromic string
_a : Dict = 0
for j in range(len(__a ) ):
_a : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_a : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_a : str = j - k + 1 # noqa: E741
_a : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
_a : Union[str, Any] = length[j]
_a : List[str] = j
# create that string
_a : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 1
|
def snake_case_ ( snake_case ) -> str:
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def snake_case_ ( snake_case ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
|
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = 0
lowercase__: List[Any] = [0]
lowercase__: str = [0]
lowercase__: Tuple = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
lowercase__: Optional[Any] = [60]
lowercase__: Dict = [10]
lowercase__: str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = [1, 2, 3]
lowercase__: Union[str, Any] = [3, 2, 1]
lowercase__: Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = 50
lowercase__: str = [60, 100, 120]
lowercase__: Any = [10, 20, 30]
lowercase__: List[Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 196
| 1
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __snake_case :
def __init__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=0.2 , _UpperCAmelCase : str=0.2 ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = bp_numa
_lowerCAmelCase : Tuple = bp_numa
_lowerCAmelCase : Any = bp_numa
_lowerCAmelCase : List[str] = conva_get[:2]
_lowerCAmelCase : Tuple = conva_get[2]
_lowerCAmelCase : List[Any] = size_pa
_lowerCAmelCase : Union[str, Any] = rate_w
_lowerCAmelCase : Tuple = rate_t
_lowerCAmelCase : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowerCAmelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
_lowerCAmelCase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
_lowerCAmelCase : str = -2 * np.random.rand(self.num_bpa ) + 1
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowercase__ , """wb""" ) as f:
pickle.dump(lowercase__ , lowercase__ )
print(f"Model saved: {save_path}" )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
with open(lowercase__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = pickle.load(lowercase__ ) # noqa: S301
_lowerCAmelCase : List[str] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
_lowerCAmelCase : Optional[int] = model_dic.get("""size_pooling1""" )
_lowerCAmelCase : Optional[int] = model_dic.get("""num_bp1""" )
_lowerCAmelCase : Tuple = model_dic.get("""num_bp2""" )
_lowerCAmelCase : str = model_dic.get("""num_bp3""" )
_lowerCAmelCase : List[str] = model_dic.get("""rate_weight""" )
_lowerCAmelCase : Any = model_dic.get("""rate_thre""" )
# create model instance
_lowerCAmelCase : Dict = CNN(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# modify model parameter
_lowerCAmelCase : int = model_dic.get("""w_conv1""" )
_lowerCAmelCase : Optional[int] = model_dic.get("""wkj""" )
_lowerCAmelCase : int = model_dic.get("""vji""" )
_lowerCAmelCase : Any = model_dic.get("""thre_conv1""" )
_lowerCAmelCase : Any = model_dic.get("""thre_bp2""" )
_lowerCAmelCase : str = model_dic.get("""thre_bp3""" )
return conv_ins
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return round(lowercase__ , 3 )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = convs[0]
_lowerCAmelCase : int = convs[1]
_lowerCAmelCase : Union[str, Any] = np.shape(lowercase__ )[0]
# get the data slice of original image data, data_focus
_lowerCAmelCase : Any = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase__ ):
_lowerCAmelCase : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase__ ):
_lowerCAmelCase : List[Any] = []
for i_focus in range(len(lowercase__ ) ):
_lowerCAmelCase : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase__ ) )
_lowerCAmelCase : int = np.asmatrix(lowercase__ ).reshape(
lowercase__ , lowercase__ )
data_featuremap.append(lowercase__ )
# expanding the data slice to One dimenssion
_lowerCAmelCase : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase__ ) )
_lowerCAmelCase : Optional[int] = np.asarray(lowercase__ )
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int]="average_pool" ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = len(featuremaps[0] )
_lowerCAmelCase : int = int(size_map / size_pooling )
_lowerCAmelCase : List[Any] = []
for i_map in range(len(lowercase__ ) ):
_lowerCAmelCase : List[Any] = featuremaps[i_map]
_lowerCAmelCase : List[str] = []
for i_focus in range(0 , lowercase__ , lowercase__ ):
for j_focus in range(0 , lowercase__ , lowercase__ ):
_lowerCAmelCase : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase__ ) )
_lowerCAmelCase : Any = np.asmatrix(lowercase__ ).reshape(lowercase__ , lowercase__ )
featuremap_pooled.append(lowercase__ )
return featuremap_pooled
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for i in range(len(lowercase__ ) ):
_lowerCAmelCase : Any = np.shape(data[i] )
_lowerCAmelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] )
_lowerCAmelCase : str = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase__ )
_lowerCAmelCase : Any = np.asarray(lowercase__ )
return data_expanded
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = np.asarray(lowercase__ )
_lowerCAmelCase : Tuple = np.shape(lowercase__ )
_lowerCAmelCase : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[int] = 0
for i_map in range(lowercase__ ):
_lowerCAmelCase : Dict = np.ones((size_map, size_map) )
for i in range(0 , lowercase__ , lowercase__ ):
for j in range(0 , lowercase__ , lowercase__ ):
_lowerCAmelCase : Any = pd_pool[
i_pool
]
_lowerCAmelCase : Dict = i_pool + 1
_lowerCAmelCase : List[Any] = np.multiply(
lowercase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase__ )
return pd_all
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str=bool ) -> Union[str, Any]:
'''simple docstring'''
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowercase__ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowercase__ )) )
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = []
_lowerCAmelCase : List[str] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
_lowerCAmelCase : int = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(lowercase__ ) ):
# print('------------Learning Image: %d--------------'%p)
_lowerCAmelCase : Optional[int] = np.asmatrix(datas_train[p] )
_lowerCAmelCase : Any = np.asarray(datas_teach[p] )
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.convolute(
lowercase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : Optional[Any] = self.pooling(lowercase__ , self.size_poolinga )
_lowerCAmelCase : List[Any] = np.shape(lowercase__ )
_lowerCAmelCase : Optional[Any] = self._expand(lowercase__ )
_lowerCAmelCase : Any = data_bp_input
_lowerCAmelCase : Optional[int] = np.dot(lowercase__ , self.vji.T ) - self.thre_bpa
_lowerCAmelCase : Optional[Any] = self.sig(lowercase__ )
_lowerCAmelCase : Tuple = np.dot(lowercase__ , self.wkj.T ) - self.thre_bpa
_lowerCAmelCase : int = self.sig(lowercase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCAmelCase : List[str] = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase__ , (1 - bp_outa) ) )
_lowerCAmelCase : Union[str, Any] = np.multiply(
np.dot(lowercase__ , self.wkj ) , np.multiply(lowercase__ , (1 - bp_outa) ) )
_lowerCAmelCase : List[Any] = np.dot(lowercase__ , self.vji )
_lowerCAmelCase : Optional[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCAmelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCAmelCase : Optional[Any] = self._calculate_gradient_from_pool(
lowercase__ , lowercase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowerCAmelCase : Optional[int] = self._expand_mat(pd_conva_all[k_conv] )
_lowerCAmelCase : int = self.rate_weight * np.dot(lowercase__ , lowercase__ )
_lowerCAmelCase : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowerCAmelCase : Union[str, Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowerCAmelCase : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCAmelCase : Tuple = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCAmelCase : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCAmelCase : Optional[int] = rp + 1
_lowerCAmelCase : Any = error_count / patterns
all_mse.append(lowercase__ )
def draw_error():
_lowerCAmelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase__ , """+-""" )
plt.plot(lowercase__ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowercase__ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowercase__ )) )
for p in range(len(lowercase__ ) ):
_lowerCAmelCase : Optional[int] = np.asmatrix(datas_test[p] )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.convolute(
lowercase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : Dict = self.pooling(lowercase__ , self.size_poolinga )
_lowerCAmelCase : Union[str, Any] = self._expand(lowercase__ )
_lowerCAmelCase : Optional[Any] = data_bp_input
_lowerCAmelCase : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_lowerCAmelCase : Union[str, Any] = self.sig(lowercase__ )
_lowerCAmelCase : str = bp_outa * self.wkj.T - self.thre_bpa
_lowerCAmelCase : Dict = self.sig(lowercase__ )
produce_out.extend(bp_outa.getA().tolist() )
_lowerCAmelCase : Optional[int] = [list(map(self.do_round , lowercase__ ) ) for each in produce_out]
return np.asarray(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Any = np.asmatrix(lowercase__ )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.convolute(
lowercase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase : int = self.pooling(lowercase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 358
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ):
'''simple docstring'''
for attribute in key.split(""".""" ):
_lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
_lowerCAmelCase : Optional[Any] = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
_lowerCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
_lowerCAmelCase : str = value
elif weight_type == "bias":
_lowerCAmelCase : str = value
else:
_lowerCAmelCase : Tuple = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Tuple = fairseq_model.state_dict()
_lowerCAmelCase : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_lowerCAmelCase : Any = None
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase : Any = True
elif name.split(""".""" )[0] == "proj":
_lowerCAmelCase : Union[str, Any] = fairseq_model.proj
_lowerCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(UpperCamelCase_ )[0].split(""".""" )[-2]
_lowerCAmelCase : Optional[Any] = mapped_key.replace("""*""" , UpperCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase : List[str] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Tuple = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Dict = """bias"""
elif "weight" in name:
_lowerCAmelCase : Optional[int] = """weight"""
else:
_lowerCAmelCase : Optional[Any] = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Optional[int] = name.split(""".""" )
_lowerCAmelCase : List[str] = int(items[0] )
_lowerCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = emb.weight.shape
_lowerCAmelCase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
_lowerCAmelCase : Dict = [line.split(""" """ )[0] for line in lines]
_lowerCAmelCase : Dict = len(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = SpeechaTextaConfig.from_pretrained(
UpperCamelCase_ , vocab_size=UpperCamelCase_ , decoder_layers=UpperCamelCase_ , do_stable_layer_norm=UpperCamelCase_ )
_lowerCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : Tuple = model[0].eval()
# set weights for wav2vec2 encoder
_lowerCAmelCase : Union[str, Any] = WavaVecaModel(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
_lowerCAmelCase : List[str] = SpeechaTextaForCausalLM(UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_lowerCAmelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_lowerCAmelCase : List[Any] = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
_lowerCAmelCase : Any = False
# add projection layer
_lowerCAmelCase : List[Any] = nn.Parameter(projection_layer.weight )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(projection_layer.bias )
_lowerCAmelCase : Any = create_vocab_dict(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Any = SpeechaTextaTokenizer(os.path.join(UpperCamelCase_ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : str = hf_wavavec.config.to_dict()
_lowerCAmelCase : Any = tokenizer.pad_token_id
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : Any = tokenizer.eos_token_id
_lowerCAmelCase : Union[str, Any] = """speech_to_text_2"""
_lowerCAmelCase : Any = """wav2vec2"""
_lowerCAmelCase : str = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 159
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_lowercase ,'r' ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] = f.readlines()
SCREAMING_SNAKE_CASE : Optional[Any] = f"class {class_name}("
SCREAMING_SNAKE_CASE : Tuple = f"{4 * ' '}def {test_name}("
SCREAMING_SNAKE_CASE : Union[str, Any] = f"{8 * ' '}{correct_line.split()[0]}"
SCREAMING_SNAKE_CASE : Any = f"{16 * ' '}{correct_line.split()[0]}"
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = []
for line in lines:
if line.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[str] = True
elif in_class and line.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
SCREAMING_SNAKE_CASE : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
SCREAMING_SNAKE_CASE : Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
SCREAMING_SNAKE_CASE : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}" )
SCREAMING_SNAKE_CASE : Optional[Any] = False
else:
new_lines.append(_lowercase )
with open(_lowercase ,'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: List[str]=None ):
"""simple docstring"""
if fail is not None:
with open(_lowercase ,'r' ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
SCREAMING_SNAKE_CASE : List[str] = None
with open(_lowercase ,'r' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.readlines()
SCREAMING_SNAKE_CASE : int = defaultdict(_lowercase )
for line in correct_lines:
SCREAMING_SNAKE_CASE : str = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
UpperCamelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 251
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase :Optional[datasets.Features] = None
class snake_case__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowerCAmelCase :Dict = PandasConfig
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a__ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase , (str, list, tuple) ):
a__ : Optional[int] = data_files
if isinstance(__lowercase , __lowercase ):
a__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : str = [dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase ):
a__ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : Dict = [dl_manager.iter_files(__lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ : Tuple = table_cast(__lowercase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
with open(__lowercase , """rb""" ) as f:
a__ : str = pa.Table.from_pandas(pd.read_pickle(__lowercase ) )
yield i, self._cast_table(__lowercase )
| 170
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Dict:
_a : int = tempfile.mkdtemp()
_a : List[Any] = BlipImageProcessor()
_a : Optional[Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
_a : Tuple = BlipProcessor(_a , _a )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self , **_a ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def __lowercase ( self , **_a ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def __lowercase ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : List[str] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> Tuple:
_a : List[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : Union[str, Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Optional[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_tokenizer()
_a : Dict = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : str = image_processor(_a , return_tensors='''np''' )
_a : Tuple = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_image_processor()
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = '''lower newer'''
_a : Dict = processor(text=_a )
_a : str = tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[int] = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Tuple = self.prepare_image_inputs()
_a : Tuple = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> str:
_a : List[str] = self.get_image_processor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : str = processor.batch_decode(_a )
_a : Optional[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_tokenizer()
_a : str = BlipProcessor(tokenizer=_a , image_processor=_a )
_a : str = '''lower newer'''
_a : Tuple = self.prepare_image_inputs()
_a : str = processor(text=_a , images=_a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 15
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15
| 1
|
import random
def UpperCamelCase_( _snake_case : int , _snake_case : float , _snake_case : bool = False ):
"""simple docstring"""
__a ={i: [] for i in range(_snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_snake_case ):
for j in range(i + 1 , _snake_case ):
if random.random() < probability:
graph[i].append(_snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_snake_case )
return graph
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
return {
i: [j for j in range(_snake_case ) if i != j] for i in range(_snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
|
from ... import PretrainedConfig
_lowerCAmelCase : Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
SCREAMING_SNAKE_CASE = 'nezha'
def __init__( self , __snake_case=2_1128 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=64 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0.1 , __snake_case=0 , __snake_case=2 , __snake_case=3 , __snake_case=True , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_act
__a =intermediate_size
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =max_relative_position
__a =type_vocab_size
__a =initializer_range
__a =layer_norm_eps
__a =classifier_dropout
__a =use_cache
| 218
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : int = '''▁'''
__A : str = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__A : Dict = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__A : Dict = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__A : Optional[int] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : List[Any] = VOCAB_FILES_NAMES
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Any = ['input_ids', 'attention_mask']
lowercase : List[int] = []
lowercase : List[int] = []
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : str = 1
UpperCamelCase : List[str] = len(self.sp_model )
UpperCamelCase : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE_ )
}
UpperCamelCase : Dict = {v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase : Tuple = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase : List[Any] = src_lang if src_lang is not None else """en_XX"""
UpperCamelCase : str = self.lang_code_to_id[self._src_lang]
UpperCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
UpperCamelCase : str = self.__dict__.copy()
UpperCamelCase : List[Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase : Any = {}
UpperCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def a_ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a_ ( self ):
return self._src_lang
@src_lang.setter
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCamelCase : Union[str, Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Optional[int] = [self.sep_token_id]
UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase : Any = src_lang
UpperCamelCase : Union[str, Any] = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tgt_lang_id
return inputs
def a_ ( self ):
UpperCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = """""".join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , """ """ ).strip()
return out_string
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi:
UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "en_XX" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "ro_RO" , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : str = src_lang
UpperCamelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def a_ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self.lang_code_to_id[src_lang]
UpperCamelCase : Dict = []
UpperCamelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = self.lang_code_to_id[lang]
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
| 27
|
"""simple docstring"""
import torch
from transformers import AutoModel
class lowerCamelCase ( torch.nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_="sayef/fsner-bert-base-uncased" ):
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
UpperCamelCase : int = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : Any = torch.nn.Softmax(dim=1 )
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
return self.bert(**SCREAMING_SNAKE_CASE_ ).last_hidden_state
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ):
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = W_supports["""sizes"""].tolist()
UpperCamelCase : List[str] = W_supports["""start_token_id"""].item()
UpperCamelCase : List[Any] = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : List[Any] = self.BERT(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Tuple = W_supports["""input_ids"""] == start_token_id
UpperCamelCase : Optional[Any] = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
UpperCamelCase : int = 0
else:
UpperCamelCase : Optional[int] = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : int = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : Optional[int] = p_start
UpperCamelCase : Tuple = p_end
return p_starts, p_ends
| 27
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( _a ):
_A : Tuple = (UniPCMultistepScheduler,)
_A : List[Any] = (('''num_inference_steps''', 2_5),)
def __UpperCamelCase ( self : Dict ,**SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,**SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:Any = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE:Tuple = kwargs.pop("num_inference_steps" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = self.dummy_sample
SCREAMING_SNAKE_CASE:Any = 0.1 * sample
SCREAMING_SNAKE_CASE:Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE:Optional[int] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE:int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE:Any = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ ,time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE:Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).prev_sample
SCREAMING_SNAKE_CASE:List[str] = new_scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str=0 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Dict = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE:List[str] = kwargs.pop("num_inference_steps" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE:int = 0.1 * sample
SCREAMING_SNAKE_CASE:Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE:List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE:str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE:Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE:Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE:str = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).prev_sample
SCREAMING_SNAKE_CASE:List[str] = new_scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ):
if scheduler is None:
SCREAMING_SNAKE_CASE:Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE:str = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = 10
SCREAMING_SNAKE_CASE:Any = self.dummy_model()
SCREAMING_SNAKE_CASE:str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE:List[str] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE:List[str] = kwargs.pop("num_inference_steps" ,SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE:Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE:Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = self.dummy_sample
SCREAMING_SNAKE_CASE:Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ ,"set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ ,"set_timesteps" ):
SCREAMING_SNAKE_CASE:Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE:Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE:Dict = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE:List[str] = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE:List[Any] = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE:Dict = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).prev_sample
SCREAMING_SNAKE_CASE:Dict = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __UpperCamelCase ( self : str ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE:Optional[int] = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE:str = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
SCREAMING_SNAKE_CASE:Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE:str = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE:int = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE:Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE:Optional[int] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __UpperCamelCase ( self : Optional[int] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ ,prediction_type=SCREAMING_SNAKE_CASE__ ,sample_max_value=SCREAMING_SNAKE_CASE__ ,solver_order=SCREAMING_SNAKE_CASE__ ,solver_type=SCREAMING_SNAKE_CASE__ ,)
def __UpperCamelCase ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ ,solver_type=SCREAMING_SNAKE_CASE__ ,prediction_type=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Union[str, Any] = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ ,solver_type=SCREAMING_SNAKE_CASE__ ,prediction_type=SCREAMING_SNAKE_CASE__ ,)
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def __UpperCamelCase ( self : Optional[int] ):
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ ,time_step=0 )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = self.full_loop()
SCREAMING_SNAKE_CASE:Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:int = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE:str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE:List[str] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ ,dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE:List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = 10
SCREAMING_SNAKE_CASE:Dict = self.dummy_model()
SCREAMING_SNAKE_CASE:Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 139
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 13_10_72,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
}
def A_ ( snake_case , snake_case ):
return torch.atana(snake_case , snake_case ) / math.pi * 2
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE:Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class _snake_case ( _a ):
pass
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ):
super().__init__()
SCREAMING_SNAKE_CASE:List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4 )
SCREAMING_SNAKE_CASE:List[str] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE:Dict = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
A_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
A_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
A_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
A_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def A_ ( snake_case ):
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A_ ( snake_case ):
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A_ ( snake_case , snake_case=13 ):
SCREAMING_SNAKE_CASE:Optional[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE:List[str] = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE:int = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE:str = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE:Tuple = string[:2]
SCREAMING_SNAKE_CASE:Optional[Any] = string[2:]
else:
SCREAMING_SNAKE_CASE:Optional[Any] = string[0]
SCREAMING_SNAKE_CASE:Optional[Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE:Any = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:List[str] = "mid_block"
elif depth > 0 and int(snake_case ) < 7:
SCREAMING_SNAKE_CASE:Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Dict = F'''down_blocks.{depth}'''
elif depth > 0 and int(snake_case ) > 7:
SCREAMING_SNAKE_CASE:Any = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
SCREAMING_SNAKE_CASE:Optional[int] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Any = F'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
SCREAMING_SNAKE_CASE:List[Any] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE:List[str] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE:List[Any] = convert_attn_naming(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = new_string_left
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE:int = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE:str = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = transform_conv_attns(snake_case , snake_case , snake_case )
else:
SCREAMING_SNAKE_CASE:Optional[int] = v
return new_state_dict
def A_ ( snake_case , snake_case , snake_case ):
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE:List[str] = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE:Optional[Any] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE:Optional[int] = v.shape[0]
SCREAMING_SNAKE_CASE:Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE:Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE:List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
SCREAMING_SNAKE_CASE:List[str] = download(snake_case )
SCREAMING_SNAKE_CASE:List[str] = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE:Tuple = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE:Union[str, Any] = Object()
SCREAMING_SNAKE_CASE:int = sample_size
SCREAMING_SNAKE_CASE:Any = sample_rate
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE:Optional[Any] = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE:Dict = orig_model.state_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = rename_orig_weights(snake_case )
SCREAMING_SNAKE_CASE:Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE:Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(snake_case ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE:Dict = value.squeeze()
SCREAMING_SNAKE_CASE:Union[str, Any] = value
diffusers_model.load_state_dict(snake_case )
SCREAMING_SNAKE_CASE:int = 100
SCREAMING_SNAKE_CASE:int = 33
SCREAMING_SNAKE_CASE:Any = IPNDMScheduler(num_train_timesteps=snake_case )
SCREAMING_SNAKE_CASE:str = torch.manual_seed(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
SCREAMING_SNAKE_CASE:int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
SCREAMING_SNAKE_CASE:List[Any] = get_crash_schedule(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
SCREAMING_SNAKE_CASE:Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
SCREAMING_SNAKE_CASE:Union[str, Any] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE:str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , snake_case )
print("Diff max" , snake_case )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
A_ = parser.parse_args()
main(args)
| 139
| 1
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A = get_logger(__name__)
__A = Path(__file__).parent / "model_card_template.md"
__A = uuida().hex
__A = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowerCAmelCase_ ( __a = None ) -> str:
"""simple docstring"""
lowerCamelCase__: List[str] =F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def lowerCAmelCase_ ( __a , __a = None , __a = None ) -> Optional[int]:
"""simple docstring"""
if token is None:
lowerCamelCase__: str =HfFolder.get_token()
if organization is None:
lowerCamelCase__: Tuple =whoami(__a )["name"]
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__a , "local_rank" ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase__: Optional[Any] =args.hub_token if hasattr(__a , "hub_token" ) else None
lowerCamelCase__: List[Any] =get_full_repo_name(__a , token=__a )
lowerCamelCase__: List[str] =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(__a , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(__a , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase__: str =os.path.join(args.output_dir , "README.md" )
model_card.save(__a )
def lowerCAmelCase_ ( __a , __a = None ) -> Optional[int]:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase__: Any =str(Path(__a ).as_posix() )
lowerCamelCase__: Optional[Any] =re.search(R"snapshots/([^/]+)/" , __a )
if search is None:
return None
lowerCamelCase__: Optional[int] =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A = os.path.join(hf_cache_home, "diffusers")
def lowerCAmelCase_ ( __a = None , __a = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
lowerCamelCase__: Dict =DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase__: Optional[Any] =old_diffusers_cache
lowerCamelCase__: str =Path(__a ).expanduser()
lowerCamelCase__: Optional[int] =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase__: Union[str, Any] =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A = 0
else:
with open(cache_version_file) as f:
try:
__A = int(f.read())
except ValueError:
__A = 0
if cache_version < 1:
__A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def lowerCAmelCase_ ( __a , __a = None ) -> str:
"""simple docstring"""
if variant is not None:
lowerCamelCase__: Optional[int] =weights_name.split("." )
lowerCamelCase__: Union[str, Any] =splits[:-1] + [variant] + splits[-1:]
lowerCamelCase__: str =".".join(__a )
return weights_name
def lowerCAmelCase_ ( __a , *,
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a=None , ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: str =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
lowerCamelCase__: Any =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
lowerCamelCase__: Dict =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse("0.20.0" )
):
try:
lowerCamelCase__: Union[str, Any] =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
lowerCamelCase__: Tuple =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"this model name. Check the model page at "
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 354
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : Distribution , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=0) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =1.0 if scale is None else scale
lowerCamelCase__: List[Any] =0.0 if loc is None else loc
super().__init__(UpperCAmelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase_)])
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
return self.variance.sqrt()
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Callable[..., Tuple[torch.Tensor]] , **UpperCAmelCase_ : Dict) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Tuple =args_dim
lowerCamelCase__: Any =nn.ModuleList([nn.Linear(UpperCAmelCase_ , UpperCAmelCase_) for dim in args_dim.values()])
lowerCamelCase__: Any =domain_map
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : torch.Tensor) ->Tuple[torch.Tensor]:
'''simple docstring'''
lowerCamelCase__: Any =[proj(UpperCAmelCase_) for proj in self.proj]
return self.domain_map(*UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Any =function
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[Any] , *UpperCAmelCase_ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase_ , *UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __init__(self : str , UpperCAmelCase_ : int = 1) ->None:
'''simple docstring'''
lowerCamelCase__: List[str] =dim
lowerCamelCase__: int ={k: dim * self.args_dim[k] for k in self.args_dim}
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : str) ->Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase_)
else:
return Independent(self.distribution_class(*UpperCAmelCase_) , 1)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , ) ->Distribution:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self._base_distribution(UpperCAmelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase_ , loc=UpperCAmelCase_ , scale=UpperCAmelCase_ , event_dim=self.event_dim)
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
return len(self.event_shape)
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : int) ->nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def SCREAMING_SNAKE_CASE_ (self : Any , *UpperCAmelCase_ : torch.Tensor) ->Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : torch.Tensor) ->torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase_) + 4.0)) / 2.0
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = {"df": 1, "loc": 1, "scale": 1}
lowercase_ = StudentT
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor) ->Dict:
'''simple docstring'''
lowerCamelCase__: str =cls.squareplus(UpperCAmelCase_).clamp_min(torch.finfo(scale.dtype).eps)
lowerCamelCase__: Any =2.0 + cls.squareplus(UpperCAmelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = {"loc": 1, "scale": 1}
lowercase_ = Normal
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =cls.squareplus(UpperCAmelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = {"total_count": 1, "logits": 1}
lowercase_ = NegativeBinomial
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =cls.squareplus(UpperCAmelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tuple) ->Distribution:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase_ , logits=UpperCAmelCase_)
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase_ , logits=UpperCAmelCase_) , 1)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None) ->Distribution:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 273
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Union[str, Any] = '▁'
_lowercase : Tuple = {'vocab_file': 'spiece.model'}
_lowercase : Optional[Any] = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_lowercase : List[Any] = {
'google/reformer-crime-and-punishment': 524288,
}
class __magic_name__ ( a__):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowercase_ : int , lowercase_ : List[Any]="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : str=[] , lowercase_ : List[Any] = None , **lowercase_ : Optional[Any] , ):
lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowercase_ : int = vocab_file
lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : List[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
lowercase_ : List[str] = self.__dict__.copy()
lowercase_ : Optional[Any] = None
return state
def __setstate__( self : str , lowercase_ : Tuple ):
lowercase_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ : List[Any] = {}
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Dict ):
return self.sp_model.piece_to_id(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
if index < self.sp_model.get_piece_size():
lowercase_ : List[Any] = self.sp_model.IdToPiece(lowercase_ )
return token
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str ):
lowercase_ : int = []
lowercase_ : List[str] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
lowercase_ : Tuple = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Any = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , """wb""" ) as fi:
lowercase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 239
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCamelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_UpperCamelCase = {
"""allenai/led-base-16384""": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _a ( ):
"""simple docstring"""
UpperCAmelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase = bs[:]
UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
UpperCAmelCase = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
return pairs
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A ,A ,A="replace" ,A="<s>" ,A="</s>" ,A="</s>" ,A="<s>" ,A="<unk>" ,A="<pad>" ,A="<mask>" ,A=False ,**A ,):
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,)
with open(A ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase = json.load(A )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = errors # how to handle errors in decoding
UpperCAmelCase = bytes_to_unicode()
UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(A ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase = {}
UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCamelCase ( self ):
return len(self.encoder )
def _UpperCamelCase ( self ):
return dict(self.encoder ,**self.added_tokens_encoder )
def _UpperCamelCase ( self ,A ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(A )
UpperCAmelCase = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(A ):
try:
UpperCAmelCase = word.index(A ,A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(A )
UpperCAmelCase = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase = get_pairs(A )
UpperCAmelCase = """ """.join(A )
UpperCAmelCase = word
return word
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = []
for token in re.findall(self.pat ,A ):
UpperCAmelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def _UpperCamelCase ( self ,A ):
return self.encoder.get(A ,self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self ,A ):
return self.decoder.get(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """""".join(A )
UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _UpperCamelCase ( self ,A ,A = None ):
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" )
UpperCAmelCase = 0
with open(A ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self ,A ,A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self ,A ,A=False ,**A ):
UpperCAmelCase = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
UpperCAmelCase = """ """ + text
return (text, kwargs)
def _UpperCamelCase ( self ,A ,A = None ,A = PaddingStrategy.DO_NOT_PAD ,A = None ,A = None ,):
UpperCAmelCase = super()._pad(
encoded_inputs=A ,max_length=A ,padding_strategy=A ,pad_to_multiple_of=A ,return_attention_mask=A ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
UpperCAmelCase = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 234
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
_UpperCamelCase = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A=None ,A=None ,A="<pad>" ,A="</s>" ,A="<unk>" ,A="<mask_2>" ,A="<mask_1>" ,A=None ,A=103 ,**A ,):
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(A ,A ):
raise TypeError(
F'''additional_special_tokens should be of type {type(A )}, but is'''
F''' {type(A )}''' )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(A ) ,self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 ,self.offset )]
super().__init__(
A ,tokenizer_file=A ,pad_token=A ,eos_token=A ,unk_token=A ,mask_token=A ,mask_token_sent=A ,offset=A ,additional_special_tokens=A ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCamelCase ( self ,A ,A=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self ,A ,A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file ,A )
return (out_vocab_file,)
| 234
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Any , __lowercase : str , __lowercase : Dict ) -> List[Any]:
"""simple docstring"""
if gpta_config_file == "":
__UpperCamelCase = GPTaConfig()
else:
__UpperCamelCase = GPTaConfig.from_json_file(__lowercase )
__UpperCamelCase = GPTaModel(__lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
__UpperCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
a__ : Optional[Any] =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 53
|
'''simple docstring'''
from __future__ import annotations
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : list[list[int]] ):
__UpperCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__A ) != 0:
__UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCamelCase = rows
else:
__UpperCamelCase = []
def _lowerCamelCase ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCamelCase ( self : str ):
return len(self.rows )
@property
def _lowerCamelCase ( self : Any ):
return len(self.rows[0] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return (self.num_rows, self.num_columns)
@property
def _lowerCamelCase ( self : Dict ):
return self.order[0] == self.order[1]
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCamelCase ( self : List[str] ):
return bool(self.determinant() )
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
__UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _lowerCamelCase ( self : Dict , __A : int , __A : int ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _lowerCamelCase ( self : List[str] ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCamelCase ( self : Union[str, Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ):
return str(self.rows )
def __str__( self : Union[str, Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__A ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__A )
else:
__UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ):
__UpperCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __A : object ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , __A : object ):
return not self == other
def __neg__( self : List[Any] ):
return self * -1
def __add__( self : List[str] , __A : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : str , __A : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , __A : Matrix | int | float ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : Union[str, Any] , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=4_00 , A=True , A=None , A=True , A=None , ) -> Tuple:
'''simple docstring'''
lowerCamelCase = size if size is not None else {"""shortest_edge""": 20}
lowerCamelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = image_size
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = do_center_crop
lowerCamelCase = crop_size
def __A ( self ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = MobileNetVaImageProcessor if is_vision_available() else None
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = MobileNetVaImageProcessingTester(self )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """do_center_crop""" ) )
self.assertTrue(hasattr(A , """crop_size""" ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 66
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
UpperCamelCase : int = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , A , A , A = None , A = 5_02_57 , A = 10_24 , A = 7_68 , A = 12 , A = 12 , A = None , A = "gelu_new" , A = 0.1 , A = 0.1 , A = 0.1 , A = 1e-5 , A = 0.02 , A = True , A = True , A = False , A = False , ) -> int:
'''simple docstring'''
super().__init__()
lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
F' `n_embd`: {n_embd} are not equal.' )
lowerCamelCase = prefix_inner_dim
lowerCamelCase = prefix_hidden_dim
lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase = GPTaConfig(
vocab_size=A , n_positions=A , n_embd=A , n_layer=A , n_head=A , n_inner=A , activation_function=A , resid_pdrop=A , embd_pdrop=A , attn_pdrop=A , layer_norm_epsilon=A , initializer_range=A , scale_attn_weights=A , use_cache=A , scale_attn_by_inverse_layer_idx=A , reorder_and_upcast_attn=A , )
lowerCamelCase = GPTaLMHeadModel(A )
def __A ( self , A , A , A = None , A = None , ) -> Any:
'''simple docstring'''
lowerCamelCase = self.transformer.transformer.wte(A )
lowerCamelCase = self.encode_prefix(A )
lowerCamelCase = self.decode_prefix(A )
lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase = self.transformer(inputs_embeds=A , labels=A , attention_mask=A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A , A ) -> torch.Tensor:
'''simple docstring'''
return torch.zeros(A , self.prefix_length , dtype=torch.intaa , device=A )
def __A ( self , A ) -> int:
'''simple docstring'''
return self.encode_prefix(A )
@torch.no_grad()
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = torch.split(A , 1 , dim=0 )
lowerCamelCase = []
lowerCamelCase = []
for feature in features:
lowerCamelCase = self.decode_prefix(feature.to(A ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase = self.generate_beam(
input_embeds=A , device=A , eos_token_id=A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase = torch.stack(A )
lowerCamelCase = torch.stack(A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A=None , A=None , A=None , A = 5 , A = 67 , A = 1.0 , A = None , ) -> Any:
'''simple docstring'''
lowerCamelCase = eos_token_id
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = torch.ones(A , device=A , dtype=torch.int )
lowerCamelCase = torch.zeros(A , device=A , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase = input_embeds
else:
lowerCamelCase = self.transformer.transformer.wte(A )
for i in range(A ):
lowerCamelCase = self.transformer(inputs_embeds=A )
lowerCamelCase = outputs.logits
lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase = logits.topk(A , -1 )
lowerCamelCase = generated.expand(A , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase = next_tokens
else:
lowerCamelCase = tokens.expand(A , *tokens.shape[1:] )
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase = -float(np.inf )
lowerCamelCase = 0
lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase = scores_sum_average.view(-1 ).topk(A , -1 )
lowerCamelCase = next_tokens // scores_sum.shape[1]
lowerCamelCase = seq_lengths[next_tokens_source]
lowerCamelCase = next_tokens % scores_sum.shape[1]
lowerCamelCase = next_tokens.unsqueeze(1 )
lowerCamelCase = tokens[next_tokens_source]
lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase = generated[next_tokens_source]
lowerCamelCase = scores_sum_average * seq_lengths
lowerCamelCase = is_stopped[next_tokens_source]
lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase = is_stopped + next_tokens.eq(A ).squeeze()
if is_stopped.all():
break
lowerCamelCase = scores / seq_lengths
lowerCamelCase = scores.argsort(descending=A )
# tokens tensors are already padded to max_seq_length
lowerCamelCase = [tokens[i] for i in order]
lowerCamelCase = torch.stack(A , dim=0 )
lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 66
| 1
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
for i in range(_A ):
for j in range(i + 1 , _A ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase__ : int = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : Any = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 187
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
snake_case_ = [True] * (num + 1)
snake_case_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _A ):
snake_case_ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Tuple = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 187
| 1
|
def UpperCamelCase ( __lowercase : float ,__lowercase : float ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""")
| 192
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def UpperCamelCase ( __lowercase : str ,__lowercase : Dict=None ):
'''simple docstring'''
require_version(deps[pkg] ,__lowercase )
| 192
| 1
|
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__snake_case ) , __snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 5
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5
| 1
|
'''simple docstring'''
import math
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Any ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A : Optional[Any] = 'Enter the base and the power separated by a comma: '
__A : int = map(int, input(prompt).split(','))
__A : str = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A : int = res(xa, ya)
__A : str = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 350
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def a__ ( self :Dict ):
snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Dict = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
@slow
def a__ ( self :Union[str, Any] ):
snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Any = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
| 8
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
A__ : Dict = pd.read_csv("""sample_data.csv""", header=None)
A__ : Dict = df.shape[:1][0]
# If you're using some other dataset input the target column
A__ : Optional[int] = df.iloc[:, 1:2]
A__ : Union[str, Any] = actual_data.values.reshape(len_data, 1)
A__ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
A__ : Optional[int] = 10
A__ : List[str] = 5
A__ : List[Any] = 20
A__ : List[Any] = len_data - periods * look_back
A__ : List[str] = actual_data[:division]
A__ : List[Any] = actual_data[division - look_back :]
A__ : Union[str, Any] = [], []
A__ : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
A__ : Dict = np.array(train_x)
A__ : Any = np.array(test_x)
A__ : List[Any] = np.array([list(i.ravel()) for i in train_y])
A__ : List[str] = np.array([list(i.ravel()) for i in test_y])
A__ : List[Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
A__ : str = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
A__ : int = model.predict(x_test)
| 185
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : List[str] = SwinvaConfig()
lowerCAmelCase_ : List[str] = swinva_name.split('''_''' )
lowerCAmelCase_ : str = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase_ : List[Any] = int(name_split[3][-3:] )
else:
lowerCAmelCase_ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase_ : List[str] = int(name_split[2][-2:] )
else:
lowerCAmelCase_ : int = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase_ : Any = 96
lowerCAmelCase_ : List[str] = (2, 2, 6, 2)
lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase_ : List[str] = 96
lowerCAmelCase_ : Any = (2, 2, 18, 2)
lowerCAmelCase_ : Dict = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase_ : Union[str, Any] = 128
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : Tuple = (4, 8, 16, 32)
else:
lowerCAmelCase_ : Optional[Any] = 192
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase_ : Union[str, Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase_ : Optional[int] = 21841
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Tuple = '''imagenet-22k-id2label.json'''
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : str = idalabel
lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ : Optional[int] = 1000
lowerCAmelCase_ : Tuple = '''huggingface/label-files'''
lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = img_size
lowerCAmelCase_ : Dict = num_classes
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Optional[int] = num_heads
lowerCAmelCase_ : Dict = window_size
return config
def UpperCamelCase( __UpperCamelCase : List[str] ):
if "patch_embed.proj" in name:
lowerCAmelCase_ : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ : Tuple = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
lowerCAmelCase_ : int = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
lowerCAmelCase_ : Any = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowerCAmelCase_ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ : Any = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ : int = name.replace('''head''' ,'''classifier''' )
else:
lowerCAmelCase_ : Union[str, Any] = '''swinv2.''' + name
return name
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Optional[int] = orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ : Dict = key.split('''.''' )
lowerCAmelCase_ : Any = int(key_split[1] )
lowerCAmelCase_ : Optional[int] = int(key_split[3] )
lowerCAmelCase_ : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ : Optional[Any] = val[:dim, :]
lowerCAmelCase_ : Any = val[dim : dim * 2, :]
lowerCAmelCase_ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase_ : Dict = val[:dim]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase_ : Dict = val[-dim:]
else:
lowerCAmelCase_ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
lowerCAmelCase_ : List[str] = get_swinva_config(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = SwinvaForImageClassification(__UpperCamelCase )
model.eval()
lowerCAmelCase_ : str = convert_state_dict(timm_model.state_dict() ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
lowerCAmelCase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='''pt''' )
lowerCAmelCase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 103
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 195
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Dict = "ctrl"
snake_case__ : int = ["past_key_values"]
snake_case__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , UpperCAmelCase__ : Any=2_4_6_5_3_4 , UpperCAmelCase__ : List[Any]=2_5_6 , UpperCAmelCase__ : Optional[int]=1_2_8_0 , UpperCAmelCase__ : Optional[Any]=8_1_9_2 , UpperCAmelCase__ : int=4_8 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=1E-6 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) -> str:
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = dff
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(**UpperCAmelCase__ )
| 195
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__A = BlipProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Union[str, Any] ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : List[str] ,**A : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : int ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : List[Any] ):
__A = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 15
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE :List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15
| 1
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase__ = 1.6021E-19 # units = C
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Callable ):
@wraps(_SCREAMING_SNAKE_CASE )
def _inner_fn(*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[str] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _SCREAMING_SNAKE_CASE , )
return fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _inner_fn
| 27
|
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : int ):
debug_launcher(test_script.main )
def lowerCAmelCase_ ( self : List[str] ):
debug_launcher(test_ops.main )
| 30
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30
| 1
|
from string import ascii_lowercase, ascii_uppercase
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase__ : Optional[Any] = dict(zip(snake_case_ , snake_case_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 146
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = PandasConfig
def a ( self : Union[str, Any] ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"""files""": files} ) )
return splits
def a ( self : Any , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(SCREAMING_SNAKE_CASE__ , self.config.features.arrow_schema )
return pa_table
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
__lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE__ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE__ )
| 229
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase :List[str] = logging.get_logger(__name__)
__UpperCAmelCase :int = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "mobilenet_v2"
def __init__( self : Optional[int] , snake_case : Tuple=3 , snake_case : List[str]=224 , snake_case : Dict=1.0 , snake_case : Tuple=8 , snake_case : Optional[Any]=8 , snake_case : Optional[Any]=6 , snake_case : List[str]=32 , snake_case : Optional[int]=True , snake_case : Dict=True , snake_case : Union[str, Any]="relu6" , snake_case : Tuple=True , snake_case : Tuple=0.8 , snake_case : Optional[int]=0.02 , snake_case : Any=0.001 , snake_case : int=255 , **snake_case : Any , ) -> int:
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Dict = depth_multiplier
__UpperCAmelCase : Tuple = depth_divisible_by
__UpperCAmelCase : Optional[int] = min_depth
__UpperCAmelCase : str = expand_ratio
__UpperCAmelCase : Union[str, Any] = output_stride
__UpperCAmelCase : Optional[int] = first_layer_is_expansion
__UpperCAmelCase : Union[str, Any] = finegrained_output
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Optional[Any] = tf_padding
__UpperCAmelCase : int = classifier_dropout_prob
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = semantic_loss_ignore_index
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowerCamelCase__ ( self : Any ) -> float:
return 1E-4
| 240
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Dict , snake_case : Dict=13 , snake_case : str=7 , snake_case : Dict=True , snake_case : Any=True , snake_case : Optional[Any]=True , snake_case : Optional[Any]=True , snake_case : List[str]=99 , snake_case : str=32 , snake_case : Any=5 , snake_case : List[str]=4 , snake_case : List[str]=37 , snake_case : int="gelu" , snake_case : int=0.1 , snake_case : int=0.1 , snake_case : Union[str, Any]=512 , snake_case : int=16 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.02 , snake_case : Any=False , snake_case : int=True , snake_case : Union[str, Any]="None" , snake_case : str=3 , snake_case : Union[str, Any]=4 , snake_case : Any=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : Any = relative_attention
__UpperCAmelCase : Dict = position_biased_input
__UpperCAmelCase : Optional[int] = pos_att_type
__UpperCAmelCase : Dict = scope
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = self.get_config()
__UpperCAmelCase : Dict = 300
return config
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = DebertaModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case )[0]
__UpperCAmelCase : Optional[int] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = DebertaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : str , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : List[Any] = DebertaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Optional[int] ) -> int:
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Union[str, Any] = DebertaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = DebertaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = DebertaModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Dict ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = DebertaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
pass
@slow
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
__UpperCAmelCase : Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__UpperCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : int = model(snake_case , attention_mask=snake_case )[0]
# compare the actual values for a slice.
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 240
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : List[Any] = SpeechTaTokenizer
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : str = True
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Any = SpeechTaTokenizer(lowerCamelCase__ )
_UpperCAmelCase : Any = AddedToken("<mask>" , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )
_UpperCAmelCase : str = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = "this is a test"
_UpperCAmelCase : Any = "this is a test"
return input_text, output_text
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : List[Any]=20 , lowerCamelCase__ : Union[str, Any]=5 ) ->int:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.get_input_output_texts(lowerCamelCase__ )
_UpperCAmelCase : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = "<pad>"
_UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(lowerCamelCase__ ) , 81 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_UpperCAmelCase : List[str] = tokenizer.vocab_size
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCAmelCase : str = ["aaaaa bbbbbb", "cccccccccdddddddd"]
_UpperCAmelCase : Optional[int] = tokenizer.add_tokens(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.vocab_size
_UpperCAmelCase : Dict = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , 0 )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , all_size + len(lowerCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_UpperCAmelCase : List[str] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
_UpperCAmelCase : str = tokenizer.add_special_tokens(lowerCamelCase__ )
_UpperCAmelCase : Dict = tokenizer.vocab_size
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , 0 )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , all_size_a + len(lowerCamelCase__ ) )
_UpperCAmelCase : str = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(lowerCamelCase__ , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_UpperCAmelCase : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
# fmt: off
self.assertListEqual(lowerCamelCase__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
_UpperCAmelCase : List[Any] = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowerCamelCase__ , )
| 234
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 1
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
_lowercase: Dict = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
_lowercase: Dict = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase: List[str] = BeautifulSoup(res.text, "html.parser")
_lowercase: Optional[int] = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""")
| 71
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: Optional[Any] = logging.get_logger(__name__)
_lowercase: Any = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git_vision_model"
def __init__(self , lowerCamelCase_=768 , lowerCamelCase_=3072 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3 , lowerCamelCase_=224 , lowerCamelCase_=16 , lowerCamelCase_="quick_gelu" , lowerCamelCase_=1E-5 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
a = num_channels
a = patch_size
a = image_size
a = initializer_range
a = attention_dropout
a = layer_norm_eps
a = hidden_act
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase_ )
a , a = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git"
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=30522 , lowerCamelCase_=768 , lowerCamelCase_=6 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1024 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=101 , lowerCamelCase_=102 , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
a = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
a = GitVisionConfig(**lowerCamelCase_ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = tie_word_embeddings
a = num_image_with_embedding
a = bos_token_id
a = eos_token_id
def UpperCamelCase_ (self ):
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 71
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.