code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a__ ( __lowercase ) -> List[str]:
_A = args.pruning_method
_A = args.threshold
_A = args.model_name_or_path.rstrip("/" )
_A = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
_A = torch.load(os.path.join(lowercase_ , "pytorch_model.bin" ) )
_A = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_A = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_A = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
_A = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_A = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
_A = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_A = name[:-6]
_A = model[f"""{prefix_}mask_scores"""]
_A = TopKBinarizer.apply(lowercase_ , lowercase_ )
_A = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_A = name[:-6]
_A = model[f"""{prefix_}mask_scores"""]
_A = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
_A = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_A = name[:-6]
_A = model[f"""{prefix_}mask_scores"""]
_A = -0.1, 1.1
_A = torch.sigmoid(lowercase_ )
_A = s * (r - l) + l
_A = s_bar.clamp(min=0.0 , max=1.0 )
_A = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_A = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
a_ = parser.parse_args()
main(args) | 707 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 0 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a__ ( __lowercase , __lowercase , __lowercase ) -> str:
_A = 0
if start < end:
_A = randint(__UpperCamelCase , __UpperCamelCase )
_A = a[end]
_A = a[pivot]
_A = temp
_A , _A = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 )
count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase )
return count
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = 0
_A = randint(__UpperCamelCase , __UpperCamelCase )
_A = a[end]
_A = a[pivot]
_A = temp
_A = start - 1
for index in range(__UpperCamelCase , __UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A = new_pivot_index + 1
_A = a[new_pivot_index]
_A = a[index]
_A = temp
_A = a[new_pivot_index + 1]
_A = a[end]
_A = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 1_00 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 708 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = 100 , ) -> Dict:
_A = x_start
_A = fnc(__lowercase )
_A = 0.0
for _ in range(__lowercase ):
# Approximates curve as a sequence of linear lines and sums their length
_A = (x_end - x_start) / steps + xa
_A = fnc(__lowercase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_A = xa
_A = fxa
return length
if __name__ == "__main__":
def a__ ( __lowercase ) -> str:
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
a_ = 10
while i <= 10_00_00:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10 | 709 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
a_ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def a__ ( ) -> Tuple:
_A = Github(os.environ["GITHUB_TOKEN"] )
_A = g.get_repo("huggingface/transformers" )
_A = repo.get_issues(state="open" )
for issue in open_issues:
_A = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=snake_case_ )
_A = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class snake_case :
def __init__( self : Dict ) -> str:
'''simple docstring'''
_A = []
_A = 0
_A = 0
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.head == self.tail
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.data.append(UpperCamelCase__ )
_A = self.tail + 1
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.data[self.head]
_A = self.head + 1
return ret
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self.tail - self.head
def a_ ( self : int ) -> Dict:
'''simple docstring'''
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class snake_case :
def __init__( self : int , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = data
_A = None
_A = None
_A = 1
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.data
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return self.left
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return self.right
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return self.height
def a_ ( self : Optional[Any] , a__ : Tuple ) -> Dict:
'''simple docstring'''
_A = data
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_A = node
def a_ ( self : Dict , a__ : Dict ) -> List[str]:
'''simple docstring'''
_A = node
def a_ ( self : Optional[int] , a__ : str ) -> Tuple:
'''simple docstring'''
_A = height
def a__ ( __lowercase ) -> int:
if node is None:
return 0
return node.get_height()
def a__ ( __lowercase , __lowercase ) -> int:
if a > b:
return a
return b
def a__ ( __lowercase ) -> MyNode:
print("left rotation node:" , node.get_data() )
_A = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCamelCase )
_A = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
_A = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def a__ ( __lowercase ) -> MyNode:
print("right rotation node:" , node.get_data() )
_A = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCamelCase )
_A = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
_A = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def a__ ( __lowercase ) -> MyNode:
_A = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCamelCase ) )
return right_rotation(__UpperCamelCase )
def a__ ( __lowercase ) -> MyNode:
_A = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCamelCase ) )
return left_rotation(__UpperCamelCase )
def a__ ( __lowercase , __lowercase ) -> MyNode | None:
if node is None:
return MyNode(__UpperCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __UpperCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_A = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_A = right_rotation(__UpperCamelCase )
else:
_A = lr_rotation(__UpperCamelCase )
else:
node.set_right(insert_node(node.get_right() , __UpperCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_A = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_A = rl_rotation(__UpperCamelCase )
else:
_A = left_rotation(__UpperCamelCase )
_A = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
return node
def a__ ( __lowercase ) -> Any:
while True:
_A = root.get_right()
if right_child is None:
break
_A = right_child
return root.get_data()
def a__ ( __lowercase ) -> Any:
while True:
_A = root.get_left()
if left_child is None:
break
_A = left_child
return root.get_data()
def a__ ( __lowercase , __lowercase ) -> MyNode | None:
_A = root.get_left()
_A = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_A = get_left_most(__UpperCamelCase )
root.set_data(__UpperCamelCase )
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
elif left_child is not None:
_A = left_child
elif right_child is not None:
_A = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(__UpperCamelCase , __UpperCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
if get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_A = left_rotation(__UpperCamelCase )
else:
_A = rl_rotation(__UpperCamelCase )
elif get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_A = right_rotation(__UpperCamelCase )
else:
_A = lr_rotation(__UpperCamelCase )
_A = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__UpperCamelCase )
return root
class snake_case :
def __init__( self : str ) -> Tuple:
'''simple docstring'''
_A = None
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
return get_height(self.root )
def a_ ( self : Optional[Any] , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
print("insert:" + str(UpperCamelCase__ ) )
_A = insert_node(self.root , UpperCamelCase__ )
def a_ ( self : Tuple , a__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
print("delete:" + str(UpperCamelCase__ ) )
if self.root is None:
print("Tree is empty!" )
return
_A = del_node(self.root , UpperCamelCase__ )
def __str__( self : Union[str, Any] , ) -> Tuple: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
_A = ''''''
_A = MyQueue()
q.push(self.root )
_A = self.get_height()
if layer == 0:
return output
_A = 0
while not q.is_empty():
_A = q.pop()
_A = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__ )
q.push(UpperCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
_A = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , UpperCamelCase__ ) - 1:
_A = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def a__ ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a_ = AVLtree()
a_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t)) | 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a_ = get_tests_dir("fixtures")
class snake_case ( unittest.TestCase):
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = mock.Mock()
_A = 5_00
_A = {}
_A = HTTPError
_A = {}
# Download this model to make sure it's in the cache.
_A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
_A = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
_A = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class snake_case ( unittest.TestCase):
@classmethod
def a_ ( cls : List[str] ) -> Optional[Any]:
'''simple docstring'''
_A = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def a_ ( cls : List[Any] ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def a_ ( self : Any ) -> int:
'''simple docstring'''
_A = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id="test-feature-extractor" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_A = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
_A = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def a_ ( self : str ) -> Any:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
_A = CustomFeatureExtractor.from_pretrained(_lowerCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_A = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" ) | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Any = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class snake_case ( _A):
__UpperCamelCase = 'distilbert'
__UpperCamelCase = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Optional[int] , a__ : List[Any]=3_05_22 , a__ : Optional[Any]=5_12 , a__ : Dict=False , a__ : Dict=6 , a__ : int=12 , a__ : Optional[int]=7_68 , a__ : Union[str, Any]=4 * 7_68 , a__ : Union[str, Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]="gelu" , a__ : Any=0.0_2 , a__ : Dict=0.1 , a__ : Union[str, Any]=0.2 , a__ : Optional[int]=0 , **a__ : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = sinusoidal_pos_embds
_A = n_layers
_A = n_heads
_A = dim
_A = hidden_dim
_A = dropout
_A = attention_dropout
_A = activation
_A = initializer_range
_A = qa_dropout
_A = seq_classif_dropout
super().__init__(**a__ , pad_token_id=a__ )
class snake_case ( _A):
@property
def a_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 713 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a_ = 25_60_47
a_ = 25_61_45
@require_sentencepiece
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase):
__UpperCamelCase = NllbTokenizer
__UpperCamelCase = NllbTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = {}
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_A = NllbTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_A = NllbTokenizer(__A , keep_accents=__A )
_A = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_A = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_A = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
_A = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = self.rust_tokenizer_class.from_pretrained(__A , **__A )
_A = self.tokenizer_class.from_pretrained(__A , **__A )
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(__A )
_A = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_A = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__A , __A )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(__A )
_A = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A , __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=True
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(__A , legacy_format=__A )
_A = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files
self.assertSequenceEqual(__A , __A )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(__A )
_A = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A , __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=False
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(__A , legacy_format=__A )
_A = tokenizer_p.save_pretrained(__A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(__A )
_A = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A , __A ) )
shutil.rmtree(__A )
@require_torch
def a_ ( self : Any ) -> str:
'''simple docstring'''
if not self.test_seqaseq:
return
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_A = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for"
" Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_A = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_A = tokenizer.prepare_seqaseq_batch(
src_texts=__A , tgt_texts=__A , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_A = tokenizer.prepare_seqaseq_batch(
__A , tgt_texts=__A , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_A = tokenizer.prepare_seqaseq_batch(
src_texts=__A , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , __A )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def a_ ( self : str ) -> Any:
'''simple docstring'''
pass
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = [AddedToken("<special>" , lstrip=__A )]
_A = self.rust_tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , **__A )
_A = tokenizer_r.encode("Hey this is a <special> token" )
_A = tokenizer_r.encode("<special>" , add_special_tokens=__A )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_A = self.rust_tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , **__A , )
_A = self.tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , **__A )
_A = tokenizer_p.encode("Hey this is a <special> token" )
_A = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def a_ ( cls : str ) -> Optional[Any]:
'''simple docstring'''
_A = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
_A = 1
return cls
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __A )
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self.assertIn(__A , self.tokenizer.all_special_ids )
# fmt: off
_A = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
_A = self.tokenizer.decode(__A , skip_special_tokens=__A )
_A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__A )
self.assertEqual(__A , __A )
self.assertNotIn(self.tokenizer.eos_token , __A )
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_A = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __A )
_A = 10
_A = self.tokenizer(__A , max_length=__A , truncation=__A ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __A )
self.assertEqual(len(__A ) , __A )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
_A = NllbTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __A )
@require_torch
def a_ ( self : int ) -> List[str]:
'''simple docstring'''
_A = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__A , truncation=__A , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_A = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __A )
self.assertEqual(__A , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = self.tokenizer(self.src_text , padding=__A , truncation=__A , max_length=3 , return_tensors="pt" )
_A = self.tokenizer(
text_target=self.tgt_text , padding=__A , truncation=__A , max_length=10 , return_tensors="pt" )
_A = targets["input_ids"]
_A = shift_tokens_right(
__A , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(__A ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = True
_A = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
_A = False
_A = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] ) | 714 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
_A = original_name.split("." )[0]
_A = key.split("." )
_A = int(key_list[key_list.index(UpperCamelCase__ ) - 2] )
_A = int(key_list[key_list.index(UpperCamelCase__ ) - 1] )
_A = orig_block_num - offset
_A = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def a__ ( __lowercase ) -> List[Any]:
_A = OrderedDict()
_A , _A = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_A = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_A = key[: key.find("proj" )]
_A = key.replace(UpperCamelCase__ , f"""patch_embeddings.{total_embed_found}.""" )
_A = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_A = "poolformer.encoder." + key
if "mlp.fc1" in key:
_A = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_A = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_A = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , "norm1" , "before_norm" )
if "norm2" in key:
_A = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_A = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_A = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_A = key.replace("head" , "classifier" )
_A = value
return new_state_dict
def a__ ( ) -> Optional[int]:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def a__ ( __lowercase , __lowercase , __lowercase ) -> Dict:
_A = PoolFormerConfig()
# set attributes based on model_name
_A = "huggingface/label-files"
_A = model_name[-3:]
_A = 1000
_A = "imagenet-1k-id2label.json"
_A = (1, 1000)
# set config attributes
_A = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
_A = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
if size == "s12":
_A = [2, 2, 6, 2]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 0.9
elif size == "s24":
_A = [4, 4, 12, 4]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 0.9
elif size == "s36":
_A = [6, 6, 18, 6]
_A = [64, 128, 320, 512]
_A = 4.0
_A = 1E-6
_A = 0.9
elif size == "m36":
_A = [6, 6, 18, 6]
_A = [96, 192, 384, 768]
_A = 4.0
_A = 1E-6
_A = 0.95
elif size == "m48":
_A = [8, 8, 24, 8]
_A = [96, 192, 384, 768]
_A = 4.0
_A = 1E-6
_A = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
_A = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
# Prepare image
_A = prepare_img()
_A = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
_A = torch.load(UpperCamelCase__ , map_location=torch.device("cpu" ) )
# rename keys
_A = rename_keys(UpperCamelCase__ )
# create HuggingFace model and load state dict
_A = PoolFormerForImageClassification(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Define image processor
_A = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
_A = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_A = model(UpperCamelCase__ )
_A = outputs.logits
# define expected logit slices for different models
if size == "s12":
_A = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_A = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_A = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_A = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_A = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __UpperCAmelCase , unittest.TestCase):
__UpperCamelCase = BlenderbotSmallTokenizer
__UpperCamelCase = False
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
super().setUp()
_A = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_A = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
_A = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_A = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE ) )
def a_ ( self : Union[str, Any] , **a__ : Any ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> str:
'''simple docstring'''
_A = "adapt act apte"
_A = "adapt act apte"
return input_text, output_text
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = "adapt act apte"
_A = ["adapt", "act", "ap@@", "te"]
_A = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [13_84]
_A = "I am a small frog."
_A = tok([src_text] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )["input_ids"]
_A = tok.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_A = "I am a small frog ."
_A = "."
_A = tok(__SCREAMING_SNAKE_CASE )["input_ids"]
_A = tok(__SCREAMING_SNAKE_CASE )["input_ids"]
assert encoded[-1] == encoded_dot[0] | 716 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 0 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def a__ ( __lowercase ) -> str:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def a__ ( __lowercase ) -> Optional[int]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_A = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase ) | 717 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a_ = get_logger(__name__)
class snake_case :
def __init__( self : Dict , a__ : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
_A = (
os.path.join(__A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_A = Extractor
def a_ ( self : Union[str, Any] , a__ : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_A = os.path.abspath(__A )
return os.path.join(self.extract_dir , hash_url_to_filename(__A ) )
def a_ ( self : str , a__ : str , a__ : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A ))
)
def a_ ( self : List[str] , a__ : str , a__ : bool = False ) -> str:
'''simple docstring'''
_A = self.extractor.infer_extractor_format(__A )
if not extractor_format:
return input_path
_A = self._get_output_path(__A )
if self._do_extract(__A , __A ):
self.extractor.extract(__A , __A , __A )
return output_path
class snake_case ( _UpperCamelCase):
@classmethod
@abstractmethod
def a_ ( cls : str , a__ : Union[Path, str] , **a__ : Any ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
...
class snake_case ( _UpperCamelCase , _UpperCamelCase):
__UpperCamelCase = []
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : int ) -> List[Any]:
'''simple docstring'''
with open(__A , "rb" ) as f:
return f.read(__A )
@classmethod
def a_ ( cls : Any , a__ : Union[Path, str] , a__ : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_A = max(len(__A ) for cls_magic_number in cls.magic_numbers )
try:
_A = cls.read_magic_number(__A , __A )
except OSError:
return False
return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers )
class snake_case ( _UpperCamelCase):
@classmethod
def a_ ( cls : Union[str, Any] , a__ : Union[Path, str] , **a__ : str ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__A )
@staticmethod
def a_ ( a__ : Optional[Any] , a__ : Any ) -> str:
'''simple docstring'''
def resolved(a__ : str ) -> str:
return os.path.realpath(os.path.abspath(__A ) )
def badpath(a__ : str , a__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A , __A ) ).startswith(__A )
def badlink(a__ : Optional[int] , a__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_A = resolved(os.path.join(__A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__A )
_A = resolved(__A )
for finfo in members:
if badpath(finfo.name , __A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__A , __A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__A , __A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
_A = tarfile.open(__A )
tar_file.extractall(__A , members=TarExtractor.safemembers(__A , __A ) )
tar_file.close()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [b'\x1F\x8B']
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(__A , "rb" ) as gzip_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def a_ ( cls : List[str] , a__ : Union[Path, str] , a__ : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(__A , magic_number=__A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A , "rb" ) as fp:
_A = _EndRecData(__A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_A = fp.read(__A ) # CD is where we expect it to be
if len(__A ) == sizeCentralDir:
_A = struct.unpack(__A , __A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(__A , exist_ok=__A )
with zipfile.ZipFile(__A , "r" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(__A ) as compressed_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__A , exist_ok=__A )
_A = rarfile.RarFile(__A )
rf.extractall(__A )
rf.close()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [b'\x28\xb5\x2F\xFD']
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_A = zstd.ZstdDecompressor()
with open(__A , "rb" ) as ifh, open(__A , "wb" ) as ofh:
dctx.copy_stream(__A , __A )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [b'\x42\x5A\x68']
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(__A , "rb" ) as compressed_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__A , exist_ok=__A )
with pyazr.SevenZipFile(__A , "r" ) as archive:
archive.extractall(__A )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = [b'\x04\x22\x4D\x18']
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__A , "rb" ) as compressed_file:
with open(__A , "wb" ) as extracted_file:
shutil.copyfileobj(__A , __A )
class snake_case :
__UpperCamelCase = {
'tar': TarExtractor,
'gzip': GzipExtractor,
'zip': ZipExtractor,
'xz': XzExtractor,
'rar': RarExtractor,
'zstd': ZstdExtractor,
'bz2': BzipaExtractor,
'7z': SevenZipExtractor, # <Added version="2.4.0"/>
'lz4': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a_ ( cls : Any ) -> List[Any]:
'''simple docstring'''
return max(
len(__A )
for extractor in cls.extractors.values()
if issubclass(__A , __A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a_ ( a__ : Union[Path, str] , a__ : int ) -> Tuple:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__A , magic_number_length=__A )
except OSError:
return b""
@classmethod
def a_ ( cls : int , a__ : Union[Path, str] , a__ : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use \'infer_extractor_format\' instead." , category=__A , )
_A = cls.infer_extractor_format(__A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a_ ( cls : Any , a__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_A = cls._get_magic_number_max_length()
_A = cls._read_magic_number(__A , __A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A , magic_number=__A ):
return extractor_format
@classmethod
def a_ ( cls : Any , a__ : Union[Path, str] , a__ : Union[Path, str] , a__ : Optional[str] = None , a__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__A ) , exist_ok=__A )
# Prevent parallel extractions
_A = str(Path(__A ).with_suffix(".lock" ) )
with FileLock(__A ):
shutil.rmtree(__A , ignore_errors=__A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A , __A ): # passed as positional arg
warnings.warn(
"Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use \'extractor_format\' instead." , category=__A , )
_A = extractor if extractor != "deprecated" else extractor_format
else:
_A = cls.extractors[extractor_format]
return extractor.extract(__A , __A )
else:
warnings.warn(
"Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__A ):
return extractor.extract(__A , __A ) | 718 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 0 |
"""simple docstring"""
from itertools import product
def a__ ( __lowercase , __lowercase ) -> Optional[int]:
_A = sides_number
_A = max_face_number * dice_number
_A = [0] * (max_total + 1)
_A = 1
_A = range(_lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(_lowerCamelCase , repeat=_lowerCamelCase ):
_A = sum(_lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def a__ ( ) -> int:
_A = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_A = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_A = 0
_A = 9
_A = 4 * 9
_A = 6
for peter_total in range(_lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_A = (4**9) * (6**6)
_A = peter_wins_count / total_games_number
_A = round(_lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 719 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case ( datasets.BuilderConfig):
__UpperCamelCase = None
__UpperCamelCase = "utf-8"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True # deprecated
__UpperCamelCase = None # deprecated
__UpperCamelCase = 10 << 20 # 10MB
__UpperCamelCase = None
class snake_case ( datasets.ArrowBasedBuilder):
__UpperCamelCase = JsonConfig
def a_ ( self : str ) -> Dict:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
_A = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def a_ ( self : Union[str, Any] , a__ : str ) -> List[str]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
_A = data_files
if isinstance(A__ , A__ ):
_A = [files]
_A = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_A = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
_A = [files]
_A = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"files": files} ) )
return splits
def a_ ( self : Union[str, Any] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_A = self.config.features.arrow_schema.field(A__ ).type
_A = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_A = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def a_ ( self : List[Any] , a__ : int ) -> Tuple:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A = json.load(A__ )
# We keep only the field we are interested in
_A = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
_A = set().union(*[row.keys() for row in dataset] )
_A = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
_A = dataset
_A = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , "rb" ) as f:
_A = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_A = max(self.config.chunksize // 32 , 16 << 10 )
_A = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_A = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_A = batch.decode(self.config.encoding , errors=A__ ).encode("utf-8" )
try:
while True:
try:
_A = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
_A = set().union(*[row.keys() for row in dataset] )
_A = {col: [row.get(A__ ) for row in dataset] for col in keys}
_A = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1 | 720 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a_ = logging.get_logger(__name__)
class snake_case ( __SCREAMING_SNAKE_CASE):
__UpperCamelCase = 'linear'
__UpperCamelCase = 'cosine'
__UpperCamelCase = 'cosine_with_restarts'
__UpperCamelCase = 'polynomial'
__UpperCamelCase = 'constant'
__UpperCamelCase = 'constant_with_warmup'
__UpperCamelCase = 'piecewise_constant'
def a__ ( __lowercase , __lowercase = -1 ) -> str:
return LambdaLR(lowerCAmelCase__ , lambda __lowercase : 1 , last_epoch=lowerCAmelCase__ )
def a__ ( __lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1.0 , lowerCAmelCase__ ) )
return 1.0
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def a__ ( __lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
_A = {}
_A = step_rules.split("," )
for rule_str in rule_list[:-1]:
_A , _A = rule_str.split(":" )
_A = int(lowerCAmelCase__ )
_A = float(lowerCAmelCase__ )
_A = value
_A = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_A = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_A = create_rules_function(lowerCAmelCase__ , lowerCAmelCase__ )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> int:
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Tuple:
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
_A = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> int:
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
_A = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase=1E-7 , __lowercase=1.0 , __lowercase=-1 ) -> Optional[Any]:
_A = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_A = lr_init - lr_end
_A = num_training_steps - num_warmup_steps
_A = 1 - (current_step - num_warmup_steps) / decay_steps
_A = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> Union[str, Any]:
_A = SchedulerType(lowerCAmelCase__ )
_A = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase__ , step_rules=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , num_cycles=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , power=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ ) | 721 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 0 |
"""simple docstring"""
class snake_case :
def __init__( self : List[Any] , a__ : str , a__ : str , a__ : Dict ) -> int:
'''simple docstring'''
_A = name
_A = value
_A = weight
def __repr__( self : Dict ) -> str:
'''simple docstring'''
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.value
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.name
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.weight
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
return self.value / self.weight
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = []
for i in range(len(_lowerCamelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
_A = sorted(_lowerCamelCase , key=_lowerCamelCase , reverse=_lowerCamelCase )
_A = []
_A , _A = 0.0, 0.0
for i in range(len(_lowerCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( snake_case_):
def __init__( self : str , *a__ : Tuple , **a__ : List[Any] ) -> str:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ )
| 701 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 0 |
"""simple docstring"""
from collections import defaultdict
class snake_case :
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : str ) -> int:
'''simple docstring'''
_A = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_A = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowercase__ ) )
]
_A = defaultdict(lowercase__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_A = (1 << len(lowercase__ )) - 1
def a_ ( self : Any , a__ : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_A = self.count_ways_until(lowercase__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_A = total_ways_util
return self.dp[mask][task_no]
def a_ ( self : Tuple , a__ : str ) -> Union[str, Any]:
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in task_performed[i]:
self.task[j].append(lowercase__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
a_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 702 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def a__ ( __lowercase ) -> int:
return getitem, k
def a__ ( __lowercase , __lowercase ) -> int:
return setitem, k, v
def a__ ( __lowercase ) -> List[str]:
return delitem, k
def a__ ( __lowercase , __lowercase , *__lowercase ) -> Tuple:
try:
return fun(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ), None
except Exception as e:
return None, e
a_ = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
a_ = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
a_ = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
a_ = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def a__ ( __lowercase ) -> int:
_A = HashMap(initial_block_size=4 )
_A = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE_ ):
_A = _run_operation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
_A = _run_operation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE_ ) == str(SCREAMING_SNAKE_CASE_ )
assert set(SCREAMING_SNAKE_CASE_ ) == set(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
assert set(my.items() ) == set(py.items() )
def a__ ( ) -> Optional[int]:
def is_public(__lowercase ) -> bool:
return not name.startswith("_" )
_A = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE_ )}
_A = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE_ )}
assert dict_public_names > hash_public_names | 703 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 0 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
_A = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = 0
while b > 0:
if b & 1:
_A = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 704 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 0 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> Union[str, Any]:
_A = [[] for _ in range(__snake_case )]
_A = key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1 or len(__snake_case ) <= key:
return input_string
for position, character in enumerate(__snake_case ):
_A = position % (lowest * 2) # puts it in bounds
_A = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__snake_case )
_A = ["".join(__snake_case ) for row in temp_grid]
_A = "".join(__snake_case )
return output_string
def a__ ( __lowercase , __lowercase ) -> Dict:
_A = []
_A = key - 1
if key <= 0:
raise ValueError("Height of grid can\'t be 0 or negative" )
if key == 1:
return input_string
_A = [[] for _ in range(__snake_case )] # generates template
for position in range(len(__snake_case ) ):
_A = position % (lowest * 2) # puts it in bounds
_A = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
_A = 0
for row in temp_grid: # fills in the characters
_A = input_string[counter : counter + len(__snake_case )]
grid.append(list(__snake_case ) )
counter += len(__snake_case )
_A = "" # reads as zigzag
for position in range(len(__snake_case ) ):
_A = position % (lowest * 2) # puts it in bounds
_A = min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( __lowercase ) -> str:
_A = {}
for key_guess in range(1 , len(__snake_case ) ): # tries every key
_A = decrypt(__snake_case , __snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase ) -> Union[str, Any]:
if len(lowerCAmelCase__ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
_A = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
from math import pi
def a__ ( __lowercase , __lowercase ) -> Union[str, Any]:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 707 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case ( _UpperCamelCase):
def __init__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.test()
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = 0
_A = False
while not completed:
if counter == 1:
self.reset()
_A = self.advance()
if not self.does_advance(a__ ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
_A , _A , _A = self.update(a__ )
counter += 1
if counter > 1_00_00:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a_ ( self : Tuple , a__ : str ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a_ ( self : List[Any] , a__ : Optional[int] ) -> str:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a_ ( self : str ) -> str:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a_ ( self : int ) -> str:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def a_ ( self : Optional[int] , a__ : Optional[int]=False ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class snake_case ( _UpperCamelCase):
def __init__( self : str , a__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super(a__ , self ).__init__()
if not isinstance(a__ , a__ ) or len(a__ ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(a__ , a__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_A = token_ids
_A = len(self.token_ids )
_A = -1 # the index of the currently fulfilled step
_A = False
def a_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def a_ ( self : Any , a__ : str ) -> Tuple:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(a__ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def a_ ( self : Dict , a__ : List[str] ) -> Dict:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(a__ )}""" )
_A = False
_A = False
_A = False
if self.does_advance(a__ ):
self.fulfilled_idx += 1
_A = True
if self.fulfilled_idx == (self.seqlen - 1):
_A = True
_A = completed
else:
# failed to make progress.
_A = True
self.reset()
return stepped, completed, reset
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = False
_A = 0
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def a_ ( self : Dict , a__ : str=False ) -> int:
'''simple docstring'''
_A = PhrasalConstraint(self.token_ids )
if stateful:
_A = self.seqlen
_A = self.fulfilled_idx
_A = self.completed
return new_constraint
class snake_case :
def __init__( self : Optional[int] , a__ : str , a__ : Dict=True ) -> Any:
'''simple docstring'''
_A = max([len(a__ ) for one in nested_token_ids] )
_A = {}
for token_ids in nested_token_ids:
_A = root
for tidx, token_id in enumerate(a__ ):
if token_id not in level:
_A = {}
_A = level[token_id]
if no_subsets and self.has_subsets(a__ , a__ ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F""" {nested_token_ids}.""" )
_A = root
def a_ ( self : Dict , a__ : Optional[int] ) -> int:
'''simple docstring'''
_A = self.trie
for current_token in current_seq:
_A = start[current_token]
_A = list(start.keys() )
return next_tokens
def a_ ( self : Optional[int] , a__ : List[str] ) -> Dict:
'''simple docstring'''
_A = self.next_tokens(a__ )
return len(a__ ) == 0
def a_ ( self : Union[str, Any] , a__ : Dict ) -> Any:
'''simple docstring'''
_A = list(root.values() )
if len(a__ ) == 0:
return 1
else:
return sum([self.count_leaves(a__ ) for nn in next_nodes] )
def a_ ( self : int , a__ : str , a__ : List[str] ) -> Dict:
'''simple docstring'''
_A = self.count_leaves(a__ )
return len(a__ ) != leaf_count
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Tuple ) -> int:
'''simple docstring'''
super(a__ , self ).__init__()
if not isinstance(a__ , a__ ) or len(a__ ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(a__ , a__ ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(a__ , a__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_A = DisjunctiveTrie(a__ )
_A = nested_token_ids
_A = self.trie.max_height
_A = []
_A = False
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_A = self.trie.next_tokens(self.current_seq )
if len(a__ ) == 0:
return None
else:
return token_list
def a_ ( self : Any , a__ : str ) -> str:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a__ )}""" )
_A = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def a_ ( self : List[str] , a__ : Any ) -> List[str]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a__ )}""" )
_A = False
_A = False
_A = False
if self.does_advance(a__ ):
self.current_seq.append(a__ )
_A = True
else:
_A = True
self.reset()
_A = self.trie.reached_leaf(self.current_seq )
_A = completed
return stepped, completed, reset
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = False
_A = []
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def a_ ( self : List[Any] , a__ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
_A = DisjunctiveConstraint(self.token_ids )
if stateful:
_A = self.seqlen
_A = self.current_seq
_A = self.completed
return new_constraint
class snake_case :
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A = constraints
# max # of steps required to fulfill a given constraint
_A = max([c.seqlen for c in constraints] )
_A = len(a__ )
_A = False
self.init_state()
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = []
_A = None
_A = [constraint.copy(stateful=a__ ) for constraint in self.constraints]
def a_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_A = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_A = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_A = constraint.advance()
if isinstance(a__ , a__ ):
token_list.append(a__ )
elif isinstance(a__ , a__ ):
token_list.extend(a__ )
else:
_A = self.inprogress_constraint.advance()
if isinstance(a__ , a__ ):
token_list.append(a__ )
elif isinstance(a__ , a__ ):
token_list.extend(a__ )
if len(a__ ) == 0:
return None
else:
return token_list
def a_ ( self : int , a__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_A , _A = self.add(a__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def a_ ( self : Optional[int] , a__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
_A , _A = False, False
if self.completed:
_A = True
_A = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_A , _A , _A = self.inprogress_constraint.update(a__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a__ ) )
_A = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_A = None
if len(self.pending_constraints ) == 0:
# we're done!
_A = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a__ ):
_A , _A , _A = pending_constraint.update(a__ )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(a__ )
_A = None
if not complete and stepped:
_A = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_A = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_A = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def a_ ( self : Any , a__ : Tuple=True ) -> List[Any]:
'''simple docstring'''
_A = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_A = [
constraint.copy(stateful=a__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_A = self.inprogress_constraint.copy(stateful=a__ )
_A = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 708 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=lowerCAmelCase__):
__UpperCamelCase = ["torch", "torchsde"]
def __init__( self : Optional[Any] , *a__ : str , **a__ : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def a_ ( cls : Optional[int] , *a__ : Optional[int] , **a__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def a_ ( cls : Optional[int] , *a__ : Optional[Any] , **a__ : Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] ) | 709 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 0 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class snake_case ( _A):
def __init__( self : int , *a__ : Union[str, Any] , **a__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def a_ ( self : Optional[Any] , a__ : Optional[int] , a__ : Any ) -> Dict:
'''simple docstring'''
_A = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCamelCase__ )
_A = self.values[key]
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
return (
sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a_ ( self : Optional[Any] , a__ : str , a__ : Tuple=None ) -> str:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0
):
return key
return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
| 710 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def a__ ( __lowercase ) -> int:
return choice(__lowercase )
def a__ ( __lowercase , __lowercase ) -> int:
_A = random_pivot(__lowercase )
# partition based on pivot
# linear time
_A = [e for e in lst if e < pivot]
_A = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowercase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowercase ) < k - 1:
return kth_number(__lowercase , k - len(__lowercase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
a_ = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
a_ = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
a_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
a_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
a_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = DPRContextEncoderTokenizer
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = DPRQuestionEncoderTokenizer
a_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
a_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
a_ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCamelCase)
class snake_case :
def __call__( self : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[int] = None , a__ : Any = None , a__ : Optional[Any] = False , a__ : Dict = False , a__ : str = None , a__ : List[Any] = None , a__ : Optional[Any] = None , **a__ : Any , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
elif titles is None or texts is None:
_A = titles if texts is None else texts
return super().__call__(
__a , __a , padding=__a , truncation=__a , max_length=__a , return_tensors=__a , return_attention_mask=__a , **__a , )
_A = titles if not isinstance(__a , __a ) else [titles]
_A = texts if not isinstance(__a , __a ) else [texts]
_A = len(__a )
_A = questions if not isinstance(__a , __a ) else [questions] * n_passages
assert len(__a ) == len(
__a ), F"""There should be as many titles than texts but got {len(__a )} titles and {len(__a )} texts."""
_A = super().__call__(__a , __a , padding=__a , truncation=__a )["input_ids"]
_A = super().__call__(__a , add_special_tokens=__a , padding=__a , truncation=__a )["input_ids"]
_A = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__a , __a )
]
}
if return_attention_mask is not False:
_A = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A = attention_mask
return self.pad(__a , padding=__a , max_length=__a , return_tensors=__a )
def a_ ( self : Dict , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : List[Any] = 16 , a__ : List[str] = 64 , a__ : Optional[int] = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
_A = reader_input["input_ids"]
_A , _A , _A = reader_output[:3]
_A = len(__a )
_A = sorted(range(__a ) , reverse=__a , key=relevance_logits.__getitem__ )
_A = []
for doc_id in sorted_docs:
_A = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A = sequence_ids.index(self.pad_token_id )
else:
_A = len(__a )
_A = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__a , top_spans=__a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__a , start_index=__a , end_index=__a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a_ ( self : Dict , a__ : List[str] , a__ : Dict , a__ : Dict , a__ : Optional[Any] , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
_A = []
for start_index, start_score in enumerate(__a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A = sorted(__a , key=lambda a__ : x[1] , reverse=__a )
_A = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
_A = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCamelCase)
class snake_case ( _UpperCamelCase , _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = DPRReaderTokenizer | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( lowercase_ , lowercase_ , unittest.TestCase):
__UpperCamelCase = CycleDiffusionPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'})
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , num_train_timesteps=10_00 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_A = CLIPTextModel(lowerCamelCase_ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a_ ( self : Union[str, Any] , a__ : str , a__ : Dict=0 ) -> Any:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_A = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith("mps" ):
_A = torch.manual_seed(lowerCamelCase_ )
else:
_A = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**lowerCamelCase_ )
_A = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_A = self.get_dummy_inputs(lowerCamelCase_ )
_A = pipe(**lowerCamelCase_ )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase_ , "half" ):
_A = module.half()
_A = CycleDiffusionPipeline(**lowerCamelCase_ )
_A = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_A = self.get_dummy_inputs(lowerCamelCase_ )
_A = pipe(**lowerCamelCase_ )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a_ ( self : int ) -> Dict:
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def a_ ( self : Any ) -> int:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : str ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_A = init_image.resize((5_12, 5_12) )
_A = """CompVis/stable-diffusion-v1-4"""
_A = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(
lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_A = """A black colored car"""
_A = """A blue colored car"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCamelCase_ , source_prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase_ , output_type="np" , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_A = init_image.resize((5_12, 5_12) )
_A = """CompVis/stable-diffusion-v1-4"""
_A = DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_A = """A black colored car"""
_A = """A blue colored car"""
_A = torch.manual_seed(0 )
_A = pipe(
prompt=lowerCamelCase_ , source_prompt=lowerCamelCase_ , image=lowerCamelCase_ , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase_ , output_type="np" , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 713 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def a__ ( __lowercase , __lowercase=7 ) -> str:
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
_A = "636036"
_A = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
_A = requests.get(A__ , headers=A__ ).json()
return result["workflow_runs"]
def a__ ( __lowercase ) -> List[Any]:
_A = get_daily_ci_runs(A__ )
_A = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_A = workflow_run["id"]
break
return workflow_run_id
def a__ ( __lowercase , __lowercase , __lowercase ) -> Tuple:
_A = get_last_daily_ci_runs(A__ )
if workflow_run_id is not None:
_A = get_artifacts_links(worflow_run_id=A__ , token=A__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_A = artifacts_links[artifact_name]
download_artifact(
artifact_name=A__ , artifact_url=A__ , output_dir=A__ , token=A__ )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
get_last_daily_ci_artifacts(A__ , A__ , A__ )
_A = {}
for artifact_name in artifact_names:
_A = os.path.join(A__ , f"""{artifact_name}.zip""" )
if os.path.isfile(A__ ):
_A = {}
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
with z.open(A__ ) as f:
_A = f.read().decode("UTF-8" )
return results | 714 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def a__ ( ) -> int:
_A = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase__ )
_A = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase__ )
env_command_parser(subparsers=lowerCamelCase__ )
launch_command_parser(subparsers=lowerCamelCase__ )
tpu_command_parser(subparsers=lowerCamelCase__ )
test_command_parser(subparsers=lowerCamelCase__ )
# Let's go
_A = parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main() | 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class snake_case :
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = psutil.Process()
_A = False
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = -1
while True:
_A = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_A = True
_A = threading.Thread(target=self.peak_monitor )
_A = True
self.thread.start()
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = False
self.thread.join()
return self.cpu_memory_peak
a_ = PeakCPUMemory()
def a__ ( ) -> str:
_A = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_A = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_A = torch.cuda.memory_allocated(a__ )
torch.cuda.reset_peak_memory_stats()
return measures
def a__ ( __lowercase ) -> Union[str, Any]:
_A = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_A = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_A = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_A = (torch.cuda.memory_allocated(a__ ) - start_measures[str(a__ )]) / 2**20
_A = (torch.cuda.max_memory_allocated(a__ ) - start_measures[str(a__ )]) / 2**20
return measures
def a__ ( __lowercase , __lowercase ) -> int:
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(a__ )]:.2f}MiB""" )
_A = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 716 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class snake_case ( __lowerCAmelCase):
__UpperCamelCase = '''speech_to_text'''
__UpperCamelCase = ['''past_key_values''']
__UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , a__ : Union[str, Any]=1_00_00 , a__ : Tuple=12 , a__ : Tuple=20_48 , a__ : List[str]=4 , a__ : List[Any]=6 , a__ : List[str]=20_48 , a__ : List[str]=4 , a__ : Dict=0.0 , a__ : Any=0.0 , a__ : List[Any]=True , a__ : Any=True , a__ : List[Any]="relu" , a__ : Dict=2_56 , a__ : int=0.1 , a__ : Dict=0.0 , a__ : Any=0.0 , a__ : Dict=0.0_2 , a__ : int=2 , a__ : Union[str, Any]=True , a__ : int=1 , a__ : Union[str, Any]=0 , a__ : str=2 , a__ : Optional[int]=60_00 , a__ : List[Any]=10_24 , a__ : str=2 , a__ : Optional[int]=(5, 5) , a__ : str=10_24 , a__ : Optional[int]=80 , a__ : str=1 , **a__ : List[str] , ) -> Tuple:
'''simple docstring'''
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , ) | 717 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case :
def __init__( self : Optional[Any] , a__ : str , a__ : int=13 , a__ : Dict=30 , a__ : int=2 , a__ : Optional[Any]=3 , a__ : Dict=True , a__ : Any=True , a__ : Union[str, Any]=32 , a__ : List[str]=5 , a__ : Optional[int]=4 , a__ : List[Any]=37 , a__ : List[Any]="gelu" , a__ : str=0.1 , a__ : Optional[int]=0.1 , a__ : List[Any]=10 , a__ : List[str]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=0.6 , a__ : Dict=None , ) -> int:
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a_ ( self : Tuple ) -> Dict:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a_ ( self : Any , a__ : Any , a__ : Union[str, Any] , a__ : Any ) -> Tuple:
'''simple docstring'''
_A = ViTMAEModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : List[str] , a__ : List[Any] , a__ : List[Any] , a__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = ViTMAEForPreTraining(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A = model(UpperCAmelCase__ )
_A = (self.image_size // self.patch_size) ** 2
_A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A = 1
_A = ViTMAEForPreTraining(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(UpperCAmelCase__ )
_A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__UpperCamelCase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_A = ViTMAEModelTester(self )
_A = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def a_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def a_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCAmelCase__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def a_ ( self : Dict ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def a_ ( self : List[str] , a__ : Optional[Any] , a__ : Dict , a__ : Tuple ) -> Optional[int]:
'''simple docstring'''
np.random.seed(2 )
_A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A = torch.from_numpy(UpperCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A = pt_noise
super().check_pt_tf_models(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def a_ ( self : Tuple ) -> Dict:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_A = outputs[0].cpu().numpy()
_A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ )
_A = model_class.from_pretrained(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Make sure we don't have nans
_A = after_outputs[0].cpu().numpy()
_A = 0
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase__ , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def a_ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ViTMAEModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a__ ( ) -> List[Any]:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
np.random.seed(2 )
_A = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(UpperCAmelCase__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A = ViTMAEConfig()
_A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_A = model(**UpperCAmelCase__ , noise=torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ ) )
# verify the logits
_A = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
_A = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase__ ) , atol=1E-4 ) ) | 718 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 0 |
"""simple docstring"""
def a__ ( __lowercase ) -> list:
_A = int(__lowercase )
if n_element < 1:
_A = ValueError("a should be a positive number" )
raise my_error
_A = [1]
_A = (0, 0, 0)
_A = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a_ = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
a_ = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------") | 719 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( unittest.TestCase):
def __init__( self : List[Any] , a__ : Dict , a__ : List[Any]=3 , a__ : Union[str, Any]=32 , a__ : Optional[int]=3 , a__ : int=10 , a__ : List[str]=[10, 20, 30, 40] , a__ : List[Any]=[1, 1, 2, 1] , a__ : str=True , a__ : Tuple=True , a__ : str="relu" , a__ : Tuple=3 , a__ : int=None , ) -> Dict:
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(SCREAMING_SNAKE_CASE_ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = self.get_config()
return config, pixel_values
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : str , a__ : Any , a__ : Optional[int] ) -> Dict:
'''simple docstring'''
_A = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE_ )
_A = model(SCREAMING_SNAKE_CASE_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Union[str, Any] , a__ : str , a__ : str ) -> Dict:
'''simple docstring'''
_A = self.num_labels
_A = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE_ )
_A = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case ( __lowerCamelCase , unittest.TestCase):
__UpperCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = FlaxRegNetModelTester(self )
_A = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def a_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(SCREAMING_SNAKE_CASE_ )
_A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
def check_hidden_states_output(a__ : Any , a__ : Any , a__ : int ):
_A = model_class(SCREAMING_SNAKE_CASE_ )
_A = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_A = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(a__ : List[Any] , **a__ : Optional[Any] ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest("JIT Enabled" ):
_A = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_A = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( ) -> Tuple:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_A = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="np" )
_A = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
_A = (1, 10_00)
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
_A = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) | 720 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""ConvNextFeatureExtractor"""]
a_ = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure) | 721 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
a_ = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
a_ = "▁"
class snake_case ( lowercase__):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , a__ : List[str] , a__ : str=True , a__ : Union[str, Any]=True , a__ : int=False , a__ : Tuple="[CLS]" , a__ : Any="[SEP]" , a__ : Union[str, Any]="<unk>" , a__ : Union[str, Any]="[SEP]" , a__ : Optional[Any]="<pad>" , a__ : str="[CLS]" , a__ : Dict="[MASK]" , a__ : Optional[Dict[str, Any]] = None , **a__ : Optional[int] , ) -> str:
'''simple docstring'''
_A = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return len(self.sp_model )
def a_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Tuple , a__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : Any , a__ : Optional[Any] ) -> str:
'''simple docstring'''
if self.remove_space:
_A = ''' '''.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
_A = unicodedata.normalize("NFKD" , UpperCAmelCase__ )
_A = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def a_ ( self : str , a__ : str ) -> Any:
'''simple docstring'''
_A = self.preprocess_text(UpperCAmelCase__ )
_A = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_A = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def a_ ( self : Tuple , a__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(UpperCAmelCase__ )
def a_ ( self : Any , a__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def a_ ( self : int , a__ : List[Any] ) -> Dict:
'''simple docstring'''
_A = []
_A = ''''''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
_A = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def a_ ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[str]:
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[str]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def a_ ( self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[Any]:
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : Any , a__ : str , a__ : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,) | 700 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
from typing import Any
def a__ ( __lowercase ) -> Tuple:
if not input_list:
return []
_A = [input_list.count(_lowerCAmelCase ) for value in input_list]
_A = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class snake_case :
def __init__( self : Dict , a__ : Tuple , a__ : Optional[Any]=13 , a__ : Tuple=7 , a__ : Tuple=False , a__ : Tuple=True , a__ : Union[str, Any]=False , a__ : Optional[int]=True , a__ : Tuple=33 , a__ : Union[str, Any]=32 , a__ : List[Any]=5 , a__ : Optional[Any]=4 , a__ : List[str]=37 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : str=0.1 , a__ : str=5_12 , a__ : Optional[int]=16 , a__ : List[Any]=2 , a__ : str=0.0_2 , a__ : int=3 , a__ : str=4 , a__ : List[Any]=None , ) -> int:
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a_ ( self : Union[str, Any] , a__ : str , a__ : List[Any] , a__ : List[str] , a__ : Dict , a__ : int , a__ : Optional[Any] ) -> int:
'''simple docstring'''
_A = EsmModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_A = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
_A = model(__lowerCamelCase )
_A = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : Tuple , a__ : Dict , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : str , a__ : Optional[Any] , a__ : str ) -> int:
'''simple docstring'''
_A = EsmForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_A = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[int] , a__ : Tuple , a__ : str , a__ : List[str] , a__ : List[str] , a__ : int , a__ : List[str] ) -> List[Any]:
'''simple docstring'''
_A = self.num_labels
_A = EsmForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_A = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
_A
) = config_and_inputs
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
__UpperCamelCase = False
__UpperCamelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = ()
__UpperCamelCase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def a_ ( self : Any ) -> List[Any]:
'''simple docstring'''
_A = EsmModelTester(self )
_A = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def a_ ( self : int ) -> Any:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def a_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def a_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = EsmModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()[0]
_A = EsmEmbeddings(config=__lowerCamelCase )
_A = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_A = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_A = create_position_ids_from_input_ids(__lowerCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
def a_ ( self : Dict ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()[0]
_A = EsmEmbeddings(config=__lowerCamelCase )
_A = torch.empty(2 , 4 , 30 )
_A = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_A = torch.as_tensor([expected_single_positions, expected_single_positions] )
_A = embeddings.create_position_ids_from_inputs_embeds(__lowerCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowerCamelCase , __lowerCamelCase ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class snake_case ( lowerCamelCase__):
@slow
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
_A = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = model(__lowerCamelCase )[0]
_A = 33
_A = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowerCamelCase )
_A = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
_A = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) ) | 702 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( UpperCAmelCase_):
__UpperCamelCase = 'altclip_text_model'
def __init__( self : Union[str, Any] , a__ : Dict=25_00_02 , a__ : Tuple=10_24 , a__ : Dict=24 , a__ : Union[str, Any]=16 , a__ : Union[str, Any]=40_96 , a__ : Union[str, Any]="gelu" , a__ : Any=0.1 , a__ : int=0.1 , a__ : Dict=5_14 , a__ : int=1 , a__ : List[str]=0.0_2 , a__ : Tuple=0.0_2 , a__ : int=1E-0_5 , a__ : int=1 , a__ : Optional[int]=0 , a__ : List[str]=2 , a__ : Any="absolute" , a__ : Any=True , a__ : str=7_68 , **a__ : Tuple , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = initializer_factor
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = project_dim
class snake_case ( UpperCAmelCase_):
__UpperCamelCase = 'altclip_vision_model'
def __init__( self : Union[str, Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : Tuple=5_12 , a__ : int=12 , a__ : List[Any]=12 , a__ : Tuple=3 , a__ : List[Any]=2_24 , a__ : Optional[Any]=32 , a__ : Dict="quick_gelu" , a__ : List[Any]=1E-5 , a__ : Tuple=0.0 , a__ : List[Any]=0.0_2 , a__ : Optional[int]=1.0 , **a__ : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(**_lowercase )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = num_channels
_A = patch_size
_A = image_size
_A = initializer_range
_A = initializer_factor
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Optional[int] , a__ : Union[str, os.PathLike] , **a__ : int ) -> int:
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
_A , _A = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class snake_case ( UpperCAmelCase_):
__UpperCamelCase = 'altclip'
__UpperCamelCase = True
def __init__( self : List[str] , a__ : Optional[Any]=None , a__ : Optional[Any]=None , a__ : Tuple=7_68 , a__ : List[str]=2.6_5_9_2 , **a__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = kwargs.pop("text_config_dict" , _lowercase )
_A = kwargs.pop("vision_config_dict" , _lowercase )
super().__init__(**_lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_A = {}
# This is the complete result when using `text_config_dict`.
_A = AltCLIPTextConfig(**_lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_A = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_A = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_A = {}
# This is the complete result when using `vision_config_dict`.
_A = AltCLIPVisionConfig(**_lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_A = {
str(_lowercase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_A = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
_A = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
_A = AltCLIPTextConfig(**_lowercase )
_A = AltCLIPVisionConfig(**_lowercase )
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
@classmethod
def a_ ( cls : str , a__ : AltCLIPTextConfig , a__ : AltCLIPVisionConfig , **a__ : Any ) -> int:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 703 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 704 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 0 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class snake_case :
def __init__( self : Union[str, Any] , a__ : str , a__ : int = 13 , a__ : int = 64 , a__ : int = 2 , a__ : int = 3 , a__ : int = 3 , a__ : bool = True , a__ : bool = True , a__ : int = 1_28 , a__ : int=[16, 32, 64, 1_28] , a__ : int = 7 , a__ : int = 4 , a__ : int = 37 , a__ : str = "gelu" , a__ : float = 0.1 , a__ : float = 0.1 , a__ : int = 10 , a__ : float = 0.0_2 , a__ : int = 2 , a__ : int = 1 , a__ : int = 1_28 , a__ : List[int] = [2, 2, 2, 2] , a__ : int = 2 , a__ : int = 2 , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = encoder_stride
_A = num_attention_outputs
_A = embed_dim
_A = embed_dim + 1
_A = resolution
_A = depths
_A = hidden_sizes
_A = dim
_A = mlp_expansion_ratio
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def a_ ( self : Tuple , a__ : int , a__ : int , a__ : int ) -> Dict:
'''simple docstring'''
_A = TFEfficientFormerModel(config=A_ )
_A = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : List[str] , a__ : int , a__ : List[Any] , a__ : List[str] ) -> Any:
'''simple docstring'''
_A = self.type_sequence_label_size
_A = TFEfficientFormerForImageClassification(A_ )
_A = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = TFEfficientFormerForImageClassification(A_ )
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
__UpperCamelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = TFEfficientFormerModelTester(self )
_A = ConfigTester(
self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def a_ ( self : int ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def a_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def a_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(A_ )
_A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(a__ : Dict , a__ : int , a__ : List[str] ):
_A = model_class(A_ )
_A = model(**self._prepare_for_class(A_ , A_ ) , training=A_ )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
if hasattr(self.model_tester , "encoder_seq_length" ):
_A = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
_A = seq_length * self.model_tester.chunk_length
else:
_A = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_A = outputs.decoder_hidden_states
self.asseretIsInstance(A_ , (list, tuple) )
self.assertEqual(len(A_ ) , A_ )
_A = getattr(self.model_tester , "seq_length" , A_ )
_A = getattr(self.model_tester , "decoder_seq_length" , A_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(A_ , A_ , A_ )
def a_ ( self : Union[str, Any] , a__ : Tuple , a__ : Optional[int] , a__ : Dict=False ) -> Any:
'''simple docstring'''
_A = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a_ ( self : Any ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def a_ ( self : Any ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a_ ( self : Dict ) -> Dict:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEfficientFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = getattr(self.model_tester , "seq_length" , A_ )
_A = getattr(self.model_tester , "encoder_seq_length" , A_ )
_A = getattr(self.model_tester , "key_length" , A_ )
_A = getattr(self.model_tester , "chunk_length" , A_ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
_A = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(A_ )
_A = model(**self._prepare_for_class(A_ , A_ ) , training=A_ )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(A_ )
_A = model(**self._prepare_for_class(A_ , A_ ) , training=A_ )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def a_ ( self : Dict ) -> int:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_A = model_class(A_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_A = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=A_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_A = model(A_ )
self.assertTrue(outputs_dict is not None )
def a__ ( ) -> Optional[int]:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_A = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=A_ , return_tensors="tf" )
# forward pass
_A = model(**A_ , training=A_ )
# verify the logits
_A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A_ )
_A = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_A = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=A_ , return_tensors="tf" )
# forward pass
_A = model(**A_ , training=A_ )
# verify the logits
_A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A_ )
_A = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 705 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_A = "hf-internal-testing/tiny-random-t5"
_A = AutoTokenizer.from_pretrained(__UpperCamelCase )
_A = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase )
_A = tokenizer("This is me" , return_tensors="pt" )
_A = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_A = model.generate(**__UpperCamelCase )
_A = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
_A = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_A = model_reloaded.generate(**__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase ) )
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
_A = "hf-internal-testing/tiny-random-t5"
_A = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase )
_A = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCamelCase ):
model.save_pretrained(__UpperCamelCase )
_A = model.reverse_bettertransformer()
model.save_pretrained(__UpperCamelCase ) | 707 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
a_ = logging.getLogger(__name__)
a_ = tf.data.AUTOTUNE
def a__ ( ) -> int:
_A = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowerCAmelCase__ , default="roberta-base" , help="The model config to use. Note that we don\'t copy the model\'s weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowerCAmelCase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowerCAmelCase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowerCAmelCase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowerCAmelCase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowerCAmelCase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowerCAmelCase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowerCAmelCase__ , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowerCAmelCase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowerCAmelCase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowerCAmelCase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowerCAmelCase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowerCAmelCase__ , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowerCAmelCase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowerCAmelCase__ , help="Model ID to upload to on the Hugging Face Hub." )
_A = parser.parse_args()
return args
def a__ ( __lowercase ) -> Dict:
try:
if args.tpu_name:
_A = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def a__ ( __lowercase ) -> Any:
_A = 0
for file in file_list:
_A = file.split("/" )[-1]
_A = re.search(R"-\d+-(\d+)\.tfrecord" , lowerCAmelCase__ ).group(1 )
_A = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]:
_A = count_samples(lowerCAmelCase__ )
_A = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
_A = dataset.shuffle(len(lowerCAmelCase__ ) )
_A = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_A = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
_A = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
_A = dataset.shuffle(args.shuffle_buffer_size )
_A = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
_A = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
_A = dataset.prefetch(lowerCAmelCase__ )
return dataset
def a__ ( __lowercase ) -> Union[str, Any]:
if not args.no_tpu:
_A = initialize_tpu(lowerCAmelCase__ )
_A = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
_A = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_A = AutoTokenizer.from_pretrained(args.tokenizer )
_A = AutoConfig.from_pretrained(args.pretrained_model_config )
_A = tokenizer.vocab_size
_A = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
_A = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
_A = count_samples(lowerCAmelCase__ )
_A = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_A = steps_per_epoch * args.num_epochs
with strategy.scope():
_A = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_A = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["accuracy"] )
def decode_fn(__lowercase ):
_A = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_A = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="tf" )
def mask_with_collator(__lowercase ):
# TF really needs an isin() function
_A = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_A = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
_A = args.per_replica_batch_size * strategy.num_replicas_in_sync
_A = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
_A = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
_A = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
a_ = parse_args()
main(args) | 709 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class snake_case :
def __init__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = {}
def a_ ( self : Optional[int] , a__ : Tuple , a__ : Optional[Any] , a__ : Optional[Any]=1 ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_A = [[w, v]]
if not self.graph.get(lowerCamelCase_ ):
_A = []
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def a_ ( self : int , a__ : str , a__ : Any ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
def a_ ( self : Union[str, Any] , a__ : Optional[Any]=-2 , a__ : Tuple=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def a_ ( self : Optional[int] , a__ : List[str]=-1 ) -> Union[str, Any]:
'''simple docstring'''
if c == -1:
_A = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_A = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 )
def a_ ( self : Optional[int] , a__ : Dict=-2 ) -> List[str]:
'''simple docstring'''
_A = deque()
_A = []
if s == -2:
_A = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
_A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : Union[str, Any] , a__ : Dict ) -> int:
'''simple docstring'''
_A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a_ ( self : Optional[int] , a__ : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.graph[u] )
def a_ ( self : Optional[int] , a__ : List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = s
_A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return sorted_nodes
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def a_ ( self : Union[str, Any] , a__ : Optional[int]=-2 , a__ : Any=-1 ) -> int:
'''simple docstring'''
_A = time()
self.dfs(lowerCamelCase_ , lowerCamelCase_ )
_A = time()
return end - begin
def a_ ( self : Tuple , a__ : Optional[Any]=-2 ) -> Tuple:
'''simple docstring'''
_A = time()
self.bfs(lowerCamelCase_ )
_A = time()
return end - begin
class snake_case :
def __init__( self : Any ) -> Any:
'''simple docstring'''
_A = {}
def a_ ( self : Tuple , a__ : List[Any] , a__ : Union[str, Any] , a__ : str=1 ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_A = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_A = [[w, u]]
def a_ ( self : Union[str, Any] , a__ : List[Any] , a__ : int ) -> List[str]:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
# the other way round
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase_ )
def a_ ( self : Tuple , a__ : List[str]=-2 , a__ : Union[str, Any]=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def a_ ( self : List[str] , a__ : Tuple=-1 ) -> Any:
'''simple docstring'''
if c == -1:
_A = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_A = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 )
def a_ ( self : Tuple , a__ : Optional[int]=-2 ) -> Dict:
'''simple docstring'''
_A = deque()
_A = []
if s == -2:
_A = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
_A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : int , a__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return list(self.graph )
def a_ ( self : Union[str, Any] , a__ : List[str]=-2 , a__ : List[str]=-1 ) -> List[str]:
'''simple docstring'''
_A = time()
self.dfs(lowerCamelCase_ , lowerCamelCase_ )
_A = time()
return end - begin
def a_ ( self : Dict , a__ : Any=-2 ) -> str:
'''simple docstring'''
_A = time()
self.bfs(lowerCamelCase_ )
_A = time()
return end - begin
| 710 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class snake_case ( lowercase_):
__UpperCamelCase = ['''pixel_values''']
def __init__( self : List[str] , a__ : bool = True , a__ : Optional[Dict[str, int]] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 2_55 , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
_A = size if size is not None else {"shortest_edge": 2_56}
_A = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
_A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_A = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
_A = do_resize
_A = size
_A = resample
_A = do_center_crop
_A = crop_size
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self : Any , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : List[str] , ) -> List[str]:
'''simple docstring'''
_A = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_A = get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def a_ ( self : Optional[Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ) -> List[str]:
'''simple docstring'''
_A = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def a_ ( self : Optional[int] , a__ : np.ndarray , a__ : float , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : List[Any] ) -> Dict:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def a_ ( self : Any , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Tuple , ) -> Any:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def a_ ( self : List[str] , a__ : ImageInput , a__ : Optional[bool] = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : Optional[bool] = None , a__ : Optional[float] = None , a__ : Optional[bool] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a__ : Tuple , ) -> Dict:
'''simple docstring'''
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
_A = resample if resample is not None else self.resample
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(UpperCamelCase__ , param_name="crop_size" )
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_A = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
_A = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
_A = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
_A = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
_A = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
_A = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
_A = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def a_ ( self : Optional[Any] , a__ : int , a__ : List[Tuple] = None ) -> str:
'''simple docstring'''
_A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCamelCase__ ):
_A = target_sizes.numpy()
_A = []
for idx in range(len(UpperCamelCase__ ) ):
_A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase__ )
_A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
_A = logits.argmax(dim=1 )
_A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : int , a__ : List[Any] , a__ : Optional[Any]=13 , a__ : List[Any]=7 , a__ : List[str]=True , a__ : Any=True , a__ : List[Any]=True , a__ : Dict=True , a__ : str=99 , a__ : List[str]=32 , a__ : Union[str, Any]=5 , a__ : Dict=4 , a__ : List[str]=37 , a__ : Tuple="gelu" , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=0.1 , a__ : Any=1_28 , a__ : int=32 , a__ : Dict=16 , a__ : Union[str, Any]=2 , a__ : List[str]=0.0_2 , a__ : Union[str, Any]=3 , a__ : int=4 , a__ : Tuple=None , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def a_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : Any ) -> List[Any]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a_ ( self : Dict , a__ : Dict , a__ : int , a__ : Union[str, Any] , a__ : str , a__ : str , a__ : int , a__ : int ) -> Optional[int]:
'''simple docstring'''
_A = NezhaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : str , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , ) -> Optional[int]:
'''simple docstring'''
_A = True
_A = NezhaModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : Optional[Any] , a__ : Any , a__ : List[str] , a__ : Any , a__ : int , a__ : Union[str, Any] , a__ : Any , a__ : int ) -> int:
'''simple docstring'''
_A = NezhaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Any , a__ : Union[str, Any] , a__ : Dict , a__ : Any , a__ : str , a__ : Optional[Any] , a__ : Tuple , a__ : Any ) -> Dict:
'''simple docstring'''
_A = NezhaForNextSentencePrediction(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a_ ( self : Any , a__ : List[str] , a__ : int , a__ : Any , a__ : Union[str, Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A = NezhaForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a_ ( self : Dict , a__ : Dict , a__ : Dict , a__ : Any , a__ : Optional[int] , a__ : Any , a__ : int , a__ : Any ) -> Any:
'''simple docstring'''
_A = NezhaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : Optional[int] , a__ : Tuple , a__ : Dict , a__ : Tuple , a__ : List[str] , a__ : Dict , a__ : str , a__ : int ) -> Optional[int]:
'''simple docstring'''
_A = self.num_labels
_A = NezhaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Tuple , a__ : int , a__ : List[str] , a__ : List[Any] , a__ : Tuple , a__ : List[str] , a__ : int , a__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_A = self.num_labels
_A = NezhaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Tuple , a__ : Optional[Any] , a__ : Tuple , a__ : Optional[Any] , a__ : Optional[Any] , a__ : Dict , a__ : Tuple , a__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.num_choices
_A = NezhaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__UpperCamelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def a_ ( self : int , a__ : str , a__ : int , a__ : int=False ) -> int:
'''simple docstring'''
_A = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def a_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = NezhaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def a_ ( self : str ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_A = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase_ )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def a_ ( self : int ) -> Any:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NezhaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
@require_torch_gpu
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_A = True
_A = model_class(config=lowerCAmelCase_ )
_A = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A = torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "bert.pt" ) )
_A = torch.jit.load(os.path.join(lowerCAmelCase_ , "bert.pt" ) , map_location=lowerCAmelCase_ )
loaded(inputs_dict["input_ids"].to(lowerCAmelCase_ ) , inputs_dict["attention_mask"].to(lowerCAmelCase_ ) )
@require_torch
class snake_case ( unittest.TestCase):
@slow
def a_ ( self : Any ) -> str:
'''simple docstring'''
_A = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_A = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
_A = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_A = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) ) | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
"""simple docstring"""
import os
def a__ ( ) -> Union[str, Any]:
_A = os.path.dirname(os.path.realpath(a_ ) )
_A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
_A = f.readlines()
_A = []
for line in triangle:
_A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
_A = a[i - 1][j] if j != len(a[i - 1] ) else 0
_A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 713 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 714 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 0 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a_ = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
a_ = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
a_ = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {\'accuracy\': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def a_ ( self : Optional[int] , a__ : Any , a__ : Dict ) -> Tuple:
'''simple docstring'''
_A = 0.0
for i, j in zip(__UpperCamelCase , __UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__UpperCamelCase , __UpperCamelCase ) else 0.0
_A = n_correct / len(__UpperCamelCase )
return {
"accuracy": accuracy,
} | 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 0 |
"""simple docstring"""
from math import factorial, radians
def a__ ( __lowercase , __lowercase = 18 , __lowercase = 10 ) -> str:
_A = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_A = radians(__lowercase )
_A = angle_in_radians
_A = 3
_A = -1
for _ in range(__lowercase ):
result += (b * (angle_in_radians**a)) / factorial(__lowercase )
_A = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowercase , __lowercase )
if __name__ == "__main__":
__import__("doctest").testmod() | 717 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a_ = "examples/"
a_ = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
a_ = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
a_ = "README.md"
def a__ ( __lowercase , __lowercase , __lowercase ) -> str:
with open(__lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_A = f.read()
_A = REPLACE_PATTERNS[pattern]
_A = replace.replace("VERSION" , __lowercase )
_A = re_pattern.sub(__lowercase , __lowercase )
with open(__lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__lowercase )
def a__ ( __lowercase ) -> Any:
for folder, directories, fnames in os.walk(__lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowercase , __lowercase ) , __lowercase , pattern="examples" )
def a__ ( __lowercase , __lowercase=False ) -> Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowercase , __lowercase , __lowercase )
if not patch:
update_version_in_examples(__lowercase )
def a__ ( ) -> List[str]:
_A = '''🤗 Transformers currently provides the following architectures'''
_A = '''1. Want to contribute a new model?'''
with open(__lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_A = f.readlines()
# Find the start of the list.
_A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_A = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(__lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowercase )
def a__ ( ) -> Optional[Any]:
with open(REPLACE_FILES["init"] , "r" ) as f:
_A = f.read()
_A = REPLACE_PATTERNS['''init'''][0].search(__lowercase ).groups()[0]
return packaging.version.parse(__lowercase )
def a__ ( __lowercase=False ) -> Union[str, Any]:
_A = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_A = default_version.base_version
elif patch:
_A = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_A = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_A = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__lowercase ) == 0:
_A = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__lowercase , patch=__lowercase )
def a__ ( ) -> int:
_A = get_version()
_A = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_A = current_version.base_version
# Check with the user we got that right.
_A = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__lowercase ) == 0:
_A = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__lowercase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
a_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work() | 718 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( __lowercase , __lowercase , __lowercase ) -> Dict:
_A = 1.5
_A = int(factor * num_class_images )
_A = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=snake_case_ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=snake_case_ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
_A = client.query(text=snake_case_ )
if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
_A = int(factor * num_images )
_A = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=snake_case_ , aesthetic_weight=0.1 , )
_A = 0
_A = 0
_A = tqdm(desc="downloading real regularization images" , total=snake_case_ )
with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
f"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
_A = class_images[count]
count += 1
try:
_A = requests.get(images["url"] )
if img.status_code == 200:
_A = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ) -> List[str]:
_A = argparse.ArgumentParser("" , add_help=snake_case_ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=snake_case_ , type=snake_case_ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=snake_case_ , type=snake_case_ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=snake_case_ )
return parser.parse_args()
if __name__ == "__main__":
a_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 719 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class snake_case ( lowercase_):
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a_ ( self : int ) -> Any:
'''simple docstring'''
_A = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(a__ )
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = self._create_example_records()
_A = Dataset.from_list(a__ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(a__ ):
self.assertDictEqual(a__ , example_records[i] )
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_A = self._create_example_records()
_A = Dataset.from_list(a__ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a_ ( self : Optional[Any] ) -> Any: # checks what happens with missing columns
'''simple docstring'''
_A = [{"col_1": 1}, {"col_2": "x"}]
_A = Dataset.from_list(a__ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def a_ ( self : Union[str, Any] ) -> Any: # checks if the type can be inferred from the second record
'''simple docstring'''
_A = [{"col_1": []}, {"col_1": [1, 2]}]
_A = Dataset.from_list(a__ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = Dataset.from_list([] )
self.assertEqual(len(a__ ) , 0 )
self.assertListEqual(dset.column_names , [] ) | 720 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 0 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a_ = logging.get_logger(__name__)
def a__ ( __lowercase , __lowercase , __lowercase ) -> None:
_A = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), f"""{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( __lowercase , __lowercase ) -> Union[str, Any]:
try:
_A = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(lowerCamelCase__ ) )
def a__ ( __lowercase , __lowercase ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( __lowercase , __lowercase = "student" , __lowercase = None , __lowercase = None , __lowercase=False , __lowercase=None , __lowercase=None , **__lowercase , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
_A = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
_A = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""teacher must be a model or string got type {type(lowerCamelCase__ )}"""
_A = teacher.config.to_diff_dict()
try:
_A , _A = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_A = teacher_e
if d is None:
_A = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
_A , _A = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_A , _A = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_A = teacher_e
if d is None:
_A = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
_A = teacher.config_class(**lowerCamelCase__ )
_A = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_A = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_A , _A = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_A = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
_A = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_A = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 721 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 0 |
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[Any] , a__ : List[Any] , a__ : int ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(a__ ) for s in shape] )}.npy"""
def a_ ( self : Tuple ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def a_ ( self : Tuple , a__ : Dict=0 , a__ : List[str]=(4, 4, 64, 64) , a__ : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
_A = jnp.bfloataa if fpaa else jnp.floataa
_A = jnp.array(load_hf_numpy(self.get_file_format(a__ , a__ ) ) , dtype=a__ )
return image
def a_ ( self : List[str] , a__ : Tuple=False , a__ : Any="CompVis/stable-diffusion-v1-4" ) -> str:
'''simple docstring'''
_A = jnp.bfloataa if fpaa else jnp.floataa
_A = "bf16" if fpaa else None
_A , _A = FlaxUNetaDConditionModel.from_pretrained(
a__ , subfolder="unet" , dtype=a__ , revision=a__ )
return model, params
def a_ ( self : List[str] , a__ : Any=0 , a__ : int=(4, 77, 7_68) , a__ : int=False ) -> Any:
'''simple docstring'''
_A = jnp.bfloataa if fpaa else jnp.floataa
_A = jnp.array(load_hf_numpy(self.get_file_format(a__ , a__ ) ) , dtype=a__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def a_ ( self : Optional[Any] , a__ : Tuple , a__ : List[str] , a__ : List[str] ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=a__ )
_A = self.get_latents(a__ , fpaa=a__ )
_A = self.get_encoder_hidden_states(a__ , fpaa=a__ )
_A = model.apply(
{"params": params} , a__ , jnp.array(a__ , dtype=jnp.intaa ) , encoder_hidden_states=a__ , ).sample
assert sample.shape == latents.shape
_A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A = jnp.array(a__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def a_ ( self : Optional[int] , a__ : List[Any] , a__ : Tuple , a__ : List[Any] ) -> str:
'''simple docstring'''
_A , _A = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=a__ )
_A = self.get_latents(a__ , shape=(4, 4, 96, 96) , fpaa=a__ )
_A = self.get_encoder_hidden_states(a__ , shape=(4, 77, 10_24) , fpaa=a__ )
_A = model.apply(
{"params": params} , a__ , jnp.array(a__ , dtype=jnp.intaa ) , encoder_hidden_states=a__ , ).sample
assert sample.shape == latents.shape
_A = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A = jnp.array(a__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(a__ , a__ , atol=1E-2 ) | 700 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
a_ = "Create a default config file for Accelerate with only a few flags set."
def a__ ( __lowercase="no" , __lowercase = default_json_config_file , __lowercase = False ) -> Any:
_A = Path(__lowercase )
path.parent.mkdir(parents=__lowercase , exist_ok=__lowercase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_A = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
_A = torch.cuda.device_count()
_A = num_gpus
_A = False
if num_gpus > 1:
_A = "MULTI_GPU"
else:
_A = "NO"
elif is_xpu_available() and use_xpu:
_A = torch.xpu.device_count()
_A = num_xpus
_A = False
if num_xpus > 1:
_A = "MULTI_XPU"
else:
_A = "NO"
elif is_npu_available():
_A = torch.npu.device_count()
_A = num_npus
_A = False
if num_npus > 1:
_A = "MULTI_NPU"
else:
_A = "NO"
else:
_A = 0
_A = True
_A = 1
_A = "NO"
_A = ClusterConfig(**__lowercase )
config.to_json_file(__lowercase )
return path
def a__ ( __lowercase , __lowercase ) -> Dict:
_A = parser.add_parser("default" , parents=__lowercase , help=__lowercase , formatter_class=__lowercase )
parser.add_argument(
"--config_file" , default=__lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowercase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__lowercase )
return parser
def a__ ( __lowercase ) -> Any:
_A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 701 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 702 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 0 |
"""simple docstring"""
def a__ ( __lowercase ) -> str:
for i in range(0 , __lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def a__ ( __lowercase ) -> Optional[int]:
for i in range(__lowercase , 0 , -1 ):
for _ in range(__lowercase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def a__ ( __lowercase ) -> Any:
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(__lowercase ) # upper half
reverse_floyd(__lowercase ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
a_ = 1
while K:
a_ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
a_ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...") | 703 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a__ ( __lowercase ) -> Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def a__ ( __lowercase ) -> List[str]:
# word like '180' or '身高' or '神'
for char in word:
_A = ord(__lowercase )
if not _is_chinese_char(__lowercase ):
return 0
return 1
def a__ ( __lowercase ) -> Dict:
_A = set()
for token in tokens:
_A = len(__lowercase ) > 1 and is_chinese(__lowercase )
if chinese_word:
word_set.add(__lowercase )
_A = list(__lowercase )
return word_list
def a__ ( __lowercase , __lowercase ) -> List[str]:
if not chinese_word_set:
return bert_tokens
_A = max([len(__lowercase ) for w in chinese_word_set] )
_A = bert_tokens
_A , _A = 0, len(__lowercase )
while start < end:
_A = True
if is_chinese(bert_word[start] ):
_A = min(end - start , __lowercase )
for i in range(__lowercase , 1 , -1 ):
_A = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_A = "##" + bert_word[j]
_A = start + i
_A = False
break
if single_word:
start += 1
return bert_word
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = []
for i in range(0 , len(__lowercase ) , 100 ):
_A = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
_A = [get_chinese_word(__lowercase ) for r in res]
ltp_res.extend(__lowercase )
assert len(__lowercase ) == len(__lowercase )
_A = []
for i in range(0 , len(__lowercase ) , 100 ):
_A = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__lowercase ) == len(__lowercase )
_A = []
for input_ids, chinese_word in zip(__lowercase , __lowercase ):
_A = []
for id in input_ids:
_A = bert_tokenizer._convert_id_to_token(__lowercase )
input_tokens.append(__lowercase )
_A = add_sub_symbol(__lowercase , __lowercase )
_A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowercase ):
if token[:2] == "##":
_A = token[2:]
# save chinese tokens' pos
if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ):
ref_id.append(__lowercase )
ref_ids.append(__lowercase )
assert len(__lowercase ) == len(__lowercase )
return ref_ids
def a__ ( __lowercase ) -> Union[str, Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_A = f.readlines()
_A = [line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_A = LTP(args.ltp ) # faster in GPU device
_A = BertTokenizer.from_pretrained(args.bert )
_A = prepare_ref(__lowercase , __lowercase , __lowercase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_A = [json.dumps(__lowercase ) + "\n" for ref in ref_ids]
f.writelines(__lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__snake_case = parser.parse_args()
main(args) | 704 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 0 |
"""simple docstring"""
import math
import sys
def a__ ( __lowercase ) -> int:
if number != int(__lowercase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_A = [-1] * (number + 1)
_A = 0
for i in range(1 , number + 1 ):
_A = sys.maxsize
_A = int(math.sqrt(__lowercase ) )
for j in range(1 , root + 1 ):
_A = 1 + answers[i - (j**2)]
_A = min(__lowercase , __lowercase )
_A = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a_ = logging.getLogger(__name__)
a_ = "pytorch_model.bin"
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'})
__UpperCamelCase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'A csv or a json file containing the validation data.'})
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'The name of the task to train on.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'The list of labels for the task.'})
@dataclasses.dataclass
class snake_case :
__UpperCamelCase = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
__UpperCamelCase = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'})
__UpperCamelCase = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__UpperCamelCase = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__UpperCamelCase = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__UpperCamelCase = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCamelCase = dataclasses.field(
default=_UpperCamelCase , metadata={'help': 'Random seed for initialization.'} , )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
_A = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_A = dataset.filter(lambda __lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_A = int(eval_result * len(__lowercase ) )
print(__lowercase )
_A = dataset.sort("probability" , reverse=__lowercase )
_A = dataset.select(range(__lowercase ) )
_A = dataset.remove_columns(["label", "probability"] )
_A = dataset.rename_column("prediction" , "label" )
_A = dataset.map(lambda __lowercase : {"label": idalabel[example["label"]]} )
_A = dataset.shuffle(seed=args.seed )
_A = os.path.join(__lowercase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowercase , index=__lowercase )
else:
dataset.to_json(__lowercase )
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , **__lowercase ) -> List[Any]:
_A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_A = STModelArguments(model_name_or_path=__lowercase )
_A = STDataArguments(train_file=__lowercase , infer_file=__lowercase )
_A = STTrainingArguments(output_dir=__lowercase )
_A = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowercase ).items():
setattr(__lowercase , __lowercase , __lowercase )
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase ):
setattr(__lowercase , __lowercase , __lowercase )
# Sanity checks
_A = {}
_A = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_A = args.train_file
_A = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_A = args.eval_file
for key in data_files:
_A = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_A = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_A = f"""{args.output_dir}/self-train_iter-{{}}""".format
_A = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowercase )
os.makedirs(__lowercase , exist_ok=__lowercase )
accelerator.wait_for_everyone()
_A = None
_A = None
_A = 0
_A = False
# Show the progress bar
_A = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_A = data_dir_format(__lowercase )
assert os.path.exists(__lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_A = os.path.join(__lowercase , "stage-1" )
_A = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowercase , __lowercase ):
arguments_dict.update({key: value} )
_A = os.path.join(__lowercase , "best-checkpoint" , __lowercase )
if os.path.exists(__lowercase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowercase , __lowercase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowercase )
finetune(**__lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowercase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_A = os.path.join(__lowercase , "best-checkpoint" )
_A = os.path.join(__lowercase , "stage-2" )
# Update arguments_dict
_A = model_path
_A = data_files["train"]
_A = current_output_dir
_A = os.path.join(__lowercase , "best-checkpoint" , __lowercase )
if os.path.exists(__lowercase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowercase , __lowercase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowercase )
finetune(**__lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowercase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowercase )
_A = iteration
_A = data_dir_format(iteration + 1 )
_A = AutoConfig.from_pretrained(os.path.join(__lowercase , "best-checkpoint" ) )
_A = config.idalabel
_A = os.path.join(__lowercase , "eval_results_best-checkpoint.json" )
_A = os.path.join(__lowercase , "test_results_best-checkpoint.json" )
assert os.path.exists(__lowercase )
with open(__lowercase , "r" ) as f:
_A = float(json.load(__lowercase )[args.eval_metric] )
_A = os.path.join(__lowercase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__lowercase )
# Loading the dataset from local csv or json files.
_A = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_A = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__lowercase , exist_ok=__lowercase )
shutil.copy(__lowercase , os.path.join(__lowercase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowercase ):
shutil.copy(__lowercase , os.path.join(__lowercase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
accelerator.wait_for_everyone()
_A = os.path.join(__lowercase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_A = eval_result
if best_iteration is None:
_A = new_iteration
_A = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_A = new_iteration
_A = new_eval_result
_A = 0
else:
if new_eval_result == best_eval_result:
_A = new_iteration
_A = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_A = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __lowercase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowercase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowercase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowercase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowercase , "eval_results_best-iteration.json" ) , )
| 706 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
import math
def a__ ( __lowercase , __lowercase ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law") | 707 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
# Load configuration defined in the metadata file
with open(__lowercase ) as metadata_file:
_A = json.load(__lowercase )
_A = LukeConfig(use_entity_aware_attention=__lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
_A = torch.load(__lowercase , map_location="cpu" )
# Load the entity vocab file
_A = load_entity_vocab(__lowercase )
_A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_A = AddedToken("<ent>" , lstrip=__lowercase , rstrip=__lowercase )
_A = AddedToken("<ent2>" , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__lowercase , __lowercase )
_A = LukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
_A = state_dict["embeddings.word_embeddings.weight"]
_A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
_A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
_A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_A = f"""encoder.layer.{layer_index}.attention.self."""
_A = state_dict[prefix + matrix_name]
_A = state_dict[prefix + matrix_name]
_A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_A = state_dict["entity_embeddings.entity_embeddings.weight"]
_A = entity_emb[entity_vocab["[MASK]"]]
_A = LukeModel(config=__lowercase ).eval()
_A , _A = model.load_state_dict(__lowercase , strict=__lowercase )
if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
_A = LukeTokenizer.from_pretrained(__lowercase , task="entity_classification" )
_A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
_A = (39, 42)
_A = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors="pt" )
_A = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
_A = torch.Size((1, 42, 1024) )
_A = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_A = torch.Size((1, 42, 768) )
_A = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_A = torch.Size((1, 1, 1024) )
_A = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_A = torch.Size((1, 1, 768) )
_A = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowercase ) )
model.save_pretrained(__lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A = {}
with open(__lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__lowercase ):
_A , _A = line.rstrip().split("\t" )
_A = index
return entity_vocab
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 708 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 0 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
) | 709 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a__ ( __lowercase ) -> Union[str, Any]:
return EnvironmentCommand()
class snake_case ( _UpperCamelCase):
@staticmethod
def a_ ( a__ : ArgumentParser ) -> Union[str, Any]:
'''simple docstring'''
_A = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = huggingface_hub.__version__
_A = "not installed"
_A = "NA"
if is_torch_available():
import torch
_A = torch.__version__
_A = torch.cuda.is_available()
_A = "not installed"
if is_transformers_available():
import transformers
_A = transformers.__version__
_A = "not installed"
if is_accelerate_available():
import accelerate
_A = accelerate.__version__
_A = "not installed"
if is_xformers_available():
import xformers
_A = xformers.__version__
_A = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def a_ ( a__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 710 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = KandinskyVaaPriorPipeline
__UpperCamelCase = ['prompt']
__UpperCamelCase = ['prompt', 'negative_prompt']
__UpperCamelCase = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__UpperCamelCase = False
@property
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return 32
@property
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return 32
@property
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def a_ ( self : Dict ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
return 1_00
@property
def a_ ( self : List[str] ) -> Any:
'''simple docstring'''
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a__ )
@property
def a_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_A = PriorTransformer(**a__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A = CLIPVisionModelWithProjection(a__ )
return model
@property
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=a__ , do_normalize=a__ , do_resize=a__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , )
return image_processor
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.dummy_prior
_A = self.dummy_image_encoder
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_image_processor
_A = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=a__ , clip_sample_range=10.0 , )
_A = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def a_ ( self : Union[str, Any] , a__ : Optional[Any] , a__ : List[str]=0 ) -> Optional[int]:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = "cpu"
_A = self.get_dummy_components()
_A = self.pipeline_class(**a__ )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = pipe(**self.get_dummy_inputs(a__ ) )
_A = output.image_embeds
_A = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_A = image[0, -10:]
_A = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_A = torch_device == "cpu"
_A = True
_A = False
self._test_inference_batch_single_identical(
test_max_difference=a__ , relax_max_difference=a__ , test_mean_pixel_difference=a__ , )
@skip_mps
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_A = torch_device == "cpu"
_A = False
self._test_attention_slicing_forward_pass(
test_max_difference=a__ , test_mean_pixel_difference=a__ , ) | 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
"""simple docstring"""
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 713 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class snake_case :
def __init__( self : Dict , a__ : Any , a__ : Union[str, Any]=13 , a__ : str=7 , a__ : str=6 , a__ : int=17 , a__ : Union[str, Any]=23 , a__ : Union[str, Any]=11 , a__ : Union[str, Any]=True , ) -> List[Any]:
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = act_dim
_A = state_dim
_A = hidden_size
_A = max_length
_A = is_training
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_A = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_A = floats_tensor((self.batch_size, self.seq_length, 1) )
_A = floats_tensor((self.batch_size, self.seq_length, 1) )
_A = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
_A = random_attention_mask((self.batch_size, self.seq_length) )
_A = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : str , a__ : Tuple , a__ : str , a__ : Dict , a__ : str , a__ : Optional[int] , ) -> Any:
'''simple docstring'''
_A = DecisionTransformerModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ , a__ , a__ , a__ , a__ , a__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (DecisionTransformerModel,) if is_torch_available() else ()
__UpperCamelCase = ()
__UpperCamelCase = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__UpperCamelCase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = DecisionTransformerModelTester(self )
_A = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def a_ ( self : str ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@slow
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DecisionTransformerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(a__ )] , a__ )
@require_torch
class snake_case ( unittest.TestCase):
@slow
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = 2 # number of steps of autoregressive prediction we will perform
_A = 10 # defined by the RL environment, may be normalized
_A = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
_A = model.to(a__ )
_A = model.config
torch.manual_seed(0 )
_A = torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ) # env.reset()
_A = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=a__ )
_A = torch.tensor(a__ , device=a__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_A = state
_A = torch.zeros(1 , 0 , config.act_dim , device=a__ , dtype=torch.floataa )
_A = torch.zeros(1 , 0 , device=a__ , dtype=torch.floataa )
_A = torch.tensor(0 , device=a__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(a__ ):
_A = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a__ )] , dim=1 )
_A = torch.cat([rewards, torch.zeros(1 , 1 , device=a__ )] , dim=1 )
_A = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_A , _A , _A = model(
states=a__ , actions=a__ , rewards=a__ , returns_to_go=a__ , timesteps=a__ , attention_mask=a__ , return_dict=a__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_A , _A , _A , _A = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ),
1.0,
False,
{},
)
_A = action_pred[0, -1]
_A = torch.cat([states, state] , dim=1 )
_A = returns_to_go[0, -1] - reward
_A = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_A = torch.cat(
[timesteps, torch.ones((1, 1) , device=a__ , dtype=torch.long ) * (step + 1)] , dim=1 ) | 714 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def a__ ( __lowercase ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def a__ ( __lowercase , __lowercase ) -> int:
_A = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_A = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
_A = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
_A = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
_A = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
_A = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
_A = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
_A = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
_A = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
_A = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
_A = key.replace("image_encoder.module" , "flava.image_model" )
_A = key.replace("text_encoder.module" , "flava.text_model" )
_A = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
_A = key.replace("mm_encoder.module" , "flava.multimodal_model" )
_A = key.replace("text_projection" , "flava.text_projection" )
_A = key.replace("image_projection" , "flava.image_projection" )
_A = value.float()
for key, value in codebook_state_dict.items():
_A = value
return upgrade
@torch.no_grad()
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Any:
if config_path is not None:
_A = FlavaConfig.from_pretrained(__lowercase )
else:
_A = FlavaConfig()
_A = FlavaForPreTraining(__lowercase ).eval()
_A = convert_dalle_checkpoint(__lowercase , __lowercase , save_checkpoint=__lowercase )
if os.path.exists(__lowercase ):
_A = torch.load(__lowercase , map_location="cpu" )
else:
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )
_A = upgrade_state_dict(__lowercase , __lowercase )
hf_model.load_state_dict(__lowercase )
_A = hf_model.state_dict()
_A = count_parameters(__lowercase )
_A = count_parameters(__lowercase ) + count_parameters(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
a_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path) | 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case ( unittest.TestCase):
@require_torch
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
_A = load_dataset("ashraq/esc50" )
_A = dataset["train"]["audio"][-1]["array"]
_A = audio_classifier(a__ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a__ ) , [{"score": 0.5_0_1, "label": "Sound of a dog"}, {"score": 0.4_9_9, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
@slow
@require_torch
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
_A = load_dataset("ashraq/esc50" )
_A = dataset["train"]["audio"][-1]["array"]
_A = audio_classifier(a__ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a__ ) , [
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
] , )
_A = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a__ ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
_A = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a__ ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass | 716 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class snake_case ( tf.keras.layers.Layer):
def __init__( self : str , a__ : Dict[str, int] , a__ : List[str] , a__ : int = None , a__ : int = None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_A = pad_token_id
_A = max_length
_A = vocab
_A = merges
_A = BytePairTokenizer(a__ , a__ , sequence_length=a__ )
@classmethod
def a_ ( cls : Optional[Any] , a__ : GPTaTokenizer , *a__ : Dict , **a__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = [" ".join(a__ ) for m in tokenizer.bpe_ranks.keys()]
_A = tokenizer.get_vocab()
return cls(a__ , a__ , *a__ , **a__ )
@classmethod
def a_ ( cls : List[str] , a__ : Union[str, os.PathLike] , *a__ : Tuple , **a__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = GPTaTokenizer.from_pretrained(a__ , *a__ , **a__ )
return cls.from_tokenizer(a__ , *a__ , **a__ )
@classmethod
def a_ ( cls : List[Any] , a__ : int ) -> Dict:
'''simple docstring'''
return cls(**a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a_ ( self : Optional[Any] , a__ : int , a__ : int = None ) -> Optional[Any]:
'''simple docstring'''
_A = self.tf_tokenizer(a__ )
_A = tf.ones_like(a__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A = pad_model_inputs(
a__ , max_seq_length=a__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 717 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a__ ( ) -> tuple[list[int], int]:
_A = [randint(-1000 , 1000 ) for i in range(10 )]
_A = randint(-5000 , 5000 )
return (arr, r)
a_ = make_dataset()
def a__ ( __lowercase , __lowercase ) -> tuple[int, ...]:
for triplet in permutations(__lowercase , 3 ):
if sum(__lowercase ) == target:
return tuple(sorted(__lowercase ) )
return (0, 0, 0)
def a__ ( __lowercase , __lowercase ) -> tuple[int, int, int]:
arr.sort()
_A = len(__lowercase )
for i in range(n - 1 ):
_A , _A = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a__ ( ) -> tuple[float, float]:
_A = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
_A = "\ntriplet_sum1(*dataset)\n"
_A = "\ntriplet_sum2(*dataset)\n"
_A = repeat(setup=__lowercase , stmt=__lowercase , repeat=5 , number=1_0000 )
_A = repeat(setup=__lowercase , stmt=__lowercase , repeat=5 , number=1_0000 )
return (min(__lowercase ), min(__lowercase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''') | 718 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( __lowercase , __lowercase = 16 ) -> Dict:
_A = AutoTokenizer.from_pretrained("bert-base-cased" )
_A = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowercase , max_length=__lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A = datasets.map(
__lowercase , batched=__lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A = 16
elif accelerator.mixed_precision != "no":
_A = 8
else:
_A = None
return tokenizer.pad(
__lowercase , padding="longest" , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets["train"] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
_A = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( __lowercase , __lowercase ) -> Optional[int]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __lowercase ) == "1":
_A = 2
# New Code #
_A = int(args.gradient_accumulation_steps )
_A = int(args.local_sgd_steps )
# Initialize accelerator
_A = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config["lr"]
_A = int(config["num_epochs"] )
_A = int(config["seed"] )
_A = int(config["batch_size"] )
_A = evaluate.load("glue" , "mrpc" )
set_seed(__lowercase )
_A , _A = get_dataloaders(__lowercase , __lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A = model.to(accelerator.device )
# Instantiate optimizer
_A = AdamW(params=model.parameters() , lr=__lowercase )
# Instantiate scheduler
_A = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A , _A , _A , _A , _A = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Now we train the model
for epoch in range(__lowercase ):
model.train()
with LocalSGD(
accelerator=__lowercase , model=__lowercase , local_sgd_steps=__lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase ):
_A = model(**__lowercase )
_A = output.loss
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A = model(**__lowercase )
_A = outputs.logits.argmax(dim=-1 )
_A , _A = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
_A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase )
def a__ ( ) -> Dict:
_A = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__lowercase , default=__lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=__lowercase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_A = parser.parse_args()
_A = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main() | 719 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spiece.model"}
a_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
a_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
a_ = "▁"
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[str] , a__ : Optional[int] , a__ : Union[str, Any]="</s>" , a__ : Union[str, Any]="<unk>" , a__ : str="<pad>" , a__ : Optional[int]=1_00 , a__ : List[Any]=None , a__ : Optional[Dict[str, Any]] = None , a__ : Any=True , **a__ : Optional[int] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_A = [F"""<extra_id_{i}>""" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_A = legacy
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_A = vocab_file
_A = extra_ids
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ : List[str] , a__ : Optional[int] , a__ : Tuple ) -> Tuple:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_A = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda a__ : bool(re.search(r"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self : List[Any] , a__ : List[int] ) -> List[int]:
'''simple docstring'''
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self : int , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_A = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : int , a__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : int , a__ : "TextInput" , **a__ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
_A = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self : str , a__ : Dict , **a__ : Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
_A = text.startswith(a__ )
if is_first:
_A = text[1:]
_A = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
_A = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self : int , a__ : List[Any] ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
_A = re.match(r"<extra_id_(\d+)>" , a__ )
_A = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self : Dict , a__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_A = self.sp_model.IdToPiece(a__ )
else:
_A = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def a_ ( self : Optional[int] , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(a__ )
_A = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : Dict , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,) | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 720 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 621 | 0 |
"""simple docstring"""
import operator as op
def a__ ( __lowercase ) -> str:
_A = []
_A = lambda __lowercase , __lowercase : int(x / y ) # noqa: E731 integer division operation
_A = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
else:
_A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
_A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowercase ) , int(__lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
a_ = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix)) | 721 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 621 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
a_ , a_ , a_ = False, False, False
@dataclass
class snake_case :
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = 'dict'
__UpperCamelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
__UpperCamelCase = field(default='Audio' , init=_UpperCamelCase , repr=_UpperCamelCase)
def __call__( self : int ) -> Any:
'''simple docstring'''
return self.pa_type
def a_ ( self : str , a__ : Union[str, bytes, dict] ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(a__ , a__ ):
return {"bytes": None, "path": value}
elif isinstance(a__ , a__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_A = BytesIO()
sf.write(a__ , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_A = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_A = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_27_67
_A = BytesIO(bytes() )
sf.write(a__ , a__ , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def a_ ( self : Optional[int] , a__ : dict , a__ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_A , _A = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_A = xsplitext(a__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_A = token_per_repo_id or {}
_A = path.split("::" )[-1]
try:
_A = string_to_dict(a__ , config.HUB_DATASETS_URL )["repo_id"]
_A = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_A = None
with xopen(a__ , "rb" , use_auth_token=a__ ) as f:
_A , _A = sf.read(a__ )
else:
_A , _A = sf.read(a__ )
_A = array.T
if self.mono:
_A = librosa.to_mono(a__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_A = librosa.resample(a__ , orig_sr=a__ , target_sr=self.sampling_rate )
_A = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def a_ ( self : Dict ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def a_ ( self : Union[str, Any] , a__ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
_A = pa.array([None] * len(a__ ) , type=pa.binary() )
_A = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_A = pa.array([None] * len(a__ ) , type=pa.string() )
_A = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_A = pa.array([Audio().encode_example(a__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_A = storage.field("bytes" )
else:
_A = pa.array([None] * len(a__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_A = storage.field("path" )
else:
_A = pa.array([None] * len(a__ ) , type=pa.string() )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(a__ , self.pa_type )
def a_ ( self : int , a__ : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(a__ : int ):
with xopen(a__ , "rb" ) as f:
_A = f.read()
return bytes_
_A = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_A = pa.array(
[os.path.basename(a__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_A = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a__ , self.pa_type ) | 700 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : str=0.0_1 , a__ : str=10_00 ) -> int:
'''simple docstring'''
_A = p_stop
_A = max_length
def __iter__( self : Any ) -> Optional[Any]:
'''simple docstring'''
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : List[str]=False , a__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
_A = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
_A = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
_A = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a_ ( self : Optional[int] , a__ : Optional[int] , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any]=False , a__ : int=2 , a__ : List[Any]=False ) -> str:
'''simple docstring'''
random.seed(a__ )
_A = list(a__ )
_A = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
_A = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a_ ( self : int ) -> int:
'''simple docstring'''
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 621 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'deberta-v2'
def __init__( self : Dict , a__ : Dict=12_81_00 , a__ : Any=15_36 , a__ : List[Any]=24 , a__ : Optional[Any]=24 , a__ : Optional[int]=61_44 , a__ : Union[str, Any]="gelu" , a__ : Optional[Any]=0.1 , a__ : List[Any]=0.1 , a__ : Optional[Any]=5_12 , a__ : Optional[Any]=0 , a__ : int=0.0_2 , a__ : Optional[int]=1E-7 , a__ : Union[str, Any]=False , a__ : Union[str, Any]=-1 , a__ : Union[str, Any]=0 , a__ : Tuple=True , a__ : Union[str, Any]=None , a__ : Dict=0 , a__ : int="gelu" , **a__ : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = relative_attention
_A = max_relative_positions
_A = pad_token_id
_A = position_biased_input
# Backwards compatibility
if type(a__ ) == str:
_A = [x.strip() for x in pos_att_type.lower().split("|" )]
_A = pos_att_type
_A = vocab_size
_A = layer_norm_eps
_A = kwargs.get("pooler_hidden_size" , a__ )
_A = pooler_dropout
_A = pooler_hidden_act
class snake_case ( _UpperCamelCase):
@property
def a_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 12
def a_ ( self : Any , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 3 , a__ : int = 40 , a__ : int = 40 , a__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_A = super().generate_dummy_inputs(preprocessor=a__ , framework=a__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 702 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class snake_case ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_A = VersatileDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = generator.manual_seed(0 )
_A = pipe.dual_guided(
prompt="first prompt" , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = "cyberpunk 2077"
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_A = torch.manual_seed(0 )
_A = pipe.dual_guided(
prompt=a__ , image=a__ , text_to_image_strength=0.7_5 , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = "A painting of a squirrel eating a burger "
_A = torch.manual_seed(0 )
_A = pipe.text_to_image(
prompt=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_A = pipe.image_variation(a__ , generator=a__ , output_type="numpy" ).images
_A = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 | 621 | 0 |
"""simple docstring"""
def a__ ( __lowercase ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def a__ ( __lowercase ) -> bool:
_A = 0
_A = number
while duplicate > 0:
_A , _A = divmod(__lowercase , 10 )
fact_sum += factorial(__lowercase )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
a_ = int(input("Enter number: ").strip())
print(
f'''{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'''
) | 703 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a_ = logging.get_logger(__name__)
@dataclass
class snake_case :
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
__UpperCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
__UpperCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.task_name.lower()
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'train'
__UpperCamelCase = 'dev'
__UpperCamelCase = 'test'
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __init__( self : Optional[int] , a__ : GlueDataTrainingArguments , a__ : PreTrainedTokenizerBase , a__ : Optional[int] = None , a__ : Union[str, Split] = Split.train , a__ : Optional[str] = None , ) -> Tuple:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , a__ , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(a__ , a__ ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(a__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , a__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[Any] ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , a__ : Union[str, Any] ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.label_list | 621 | 0 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case ( tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__( self : Tuple , a__ : float , a__ : Callable , a__ : int , a__ : float = 1.0 , a__ : str = None , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_A = initial_learning_rate
_A = warmup_steps
_A = power
_A = decay_schedule_fn
_A = name
def __call__( self : Optional[Any] , a__ : int ) -> Dict:
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_A = tf.cast(a__ , tf.floataa )
_A = tf.cast(self.warmup_steps , tf.floataa )
_A = global_step_float / warmup_steps_float
_A = self.initial_learning_rate * tf.math.pow(a__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=a__ , )
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = 0.0 , __lowercase = 0.9 , __lowercase = 0.999 , __lowercase = 1E-8 , __lowercase = None , __lowercase = None , __lowercase = 0.0 , __lowercase = 1.0 , __lowercase = None , ) -> Union[str, Any]:
_A = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowercase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowercase , )
if num_warmup_steps:
_A = WarmUp(
initial_learning_rate=__lowercase , decay_schedule_fn=__lowercase , warmup_steps=__lowercase , )
if weight_decay_rate > 0.0:
_A = AdamWeightDecay(
learning_rate=__lowercase , weight_decay_rate=__lowercase , beta_a=__lowercase , beta_a=__lowercase , epsilon=__lowercase , clipnorm=__lowercase , global_clipnorm=__lowercase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__lowercase , )
else:
_A = tf.keras.optimizers.Adam(
learning_rate=__lowercase , beta_a=__lowercase , beta_a=__lowercase , epsilon=__lowercase , clipnorm=__lowercase , global_clipnorm=__lowercase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[int] , a__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , a__ : float = 0.9 , a__ : float = 0.9_9_9 , a__ : float = 1E-7 , a__ : bool = False , a__ : float = 0.0 , a__ : Optional[List[str]] = None , a__ : Optional[List[str]] = None , a__ : str = "AdamWeightDecay" , **a__ : Union[str, Any] , ) -> str:
'''simple docstring'''
super().__init__(a__ , a__ , a__ , a__ , a__ , a__ , **a__ )
_A = weight_decay_rate
_A = include_in_weight_decay
_A = exclude_from_weight_decay
@classmethod
def a_ ( cls : int , a__ : Dict ) -> int:
'''simple docstring'''
_A = {"WarmUp": WarmUp}
return super(a__ , cls ).from_config(a__ , custom_objects=a__ )
def a_ ( self : Any , a__ : Tuple , a__ : Dict , a__ : Any ) -> Optional[Any]:
'''simple docstring'''
super(a__ , self )._prepare_local(a__ , a__ , a__ )
_A = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def a_ ( self : int , a__ : Optional[Any] , a__ : List[str] , a__ : str ) -> str:
'''simple docstring'''
_A = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def a_ ( self : Optional[Any] , a__ : Dict , a__ : Any=None , **a__ : List[str] ) -> Tuple:
'''simple docstring'''
_A , _A = list(zip(*a__ ) )
return super(a__ , self ).apply_gradients(zip(a__ , a__ ) , name=a__ , **a__ )
def a_ ( self : Optional[int] , a__ : Dict , a__ : Tuple , a__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_A = apply_state or {}
_A = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_A = self._fallback_apply_state(a__ , a__ )
_A = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def a_ ( self : List[str] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
_A , _A = self._get_lr(var.device , var.dtype.base_dtype , a__ )
_A = self._decay_weights_op(a__ , a__ , a__ )
with tf.control_dependencies([decay] ):
return super(a__ , self )._resource_apply_dense(a__ , a__ , **a__ )
def a_ ( self : List[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Dict , a__ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
_A , _A = self._get_lr(var.device , var.dtype.base_dtype , a__ )
_A = self._decay_weights_op(a__ , a__ , a__ )
with tf.control_dependencies([decay] ):
return super(a__ , self )._resource_apply_sparse(a__ , a__ , a__ , **a__ )
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_A = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def a_ ( self : Any , a__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(a__ , a__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(a__ , a__ ) is not None:
return False
return True
class snake_case ( _UpperCamelCase):
def __init__( self : Any ) -> List[Any]:
'''simple docstring'''
_A = []
_A = None
@property
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
if self._accum_steps is None:
_A = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=a__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Tuple , a__ : Optional[Any] ) -> int:
'''simple docstring'''
if not self._gradients:
_A = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(a__ ) , trainable=a__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(a__ ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(a__ )}""" )
for accum_gradient, gradient in zip(self._gradients , a__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(a__ )
self._accum_steps.assign_add(1 )
def a_ ( self : str ) -> int:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(a__ ) ) | 704 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
# Return True if there is node that has not iterated.
_A = [False] * len(__lowercase )
_A = []
queue.append(__lowercase )
_A = True
while queue:
_A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_A = True
_A = u
return visited[t]
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
# This array is filled by BFS and to store path
_A = [-1] * (len(__lowercase ))
_A = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_A = float("Inf" )
_A = sink
while s != source:
# Find the minimum value in select path
_A = min(__lowercase , graph[parent[s]][s] )
_A = parent[s]
max_flow += path_flow
_A = sink
while v != source:
_A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_A = parent[v]
return max_flow
a_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a_ , a_ = 0, 5
print(ford_fulkerson(graph, source, sink)) | 621 | 0 |
"""simple docstring"""
def a__ ( __lowercase ) -> float:
return 10 - x * x
def a__ ( __lowercase , __lowercase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__lowercase ) * equation(__lowercase ) >= 0:
raise ValueError("Wrong space!" )
_A = a
while (b - a) >= 0.01:
# Find middle point
_A = (a + b) / 2
# Check if middle point is root
if equation(__lowercase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowercase ) * equation(__lowercase ) < 0:
_A = c
else:
_A = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 705 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = state_dict.pop(__lowercase )
_A = val
def a__ ( __lowercase ) -> List[str]:
_A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_A = value
else:
_A = value
return new_state_dict
def a__ ( __lowercase , __lowercase=False ) -> Any:
_A = ""
if is_panoptic:
_A = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:256, :]
_A = in_proj_bias[:256]
_A = in_proj_weight[256:512, :]
_A = in_proj_bias[256:512]
_A = in_proj_weight[-256:, :]
_A = in_proj_bias[-256:]
def a__ ( ) -> int:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( __lowercase , __lowercase ) -> Any:
_A = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_A = "resnet101"
if "dc5" in model_name:
_A = True
_A = "panoptic" in model_name
if is_panoptic:
_A = 250
else:
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
# load image processor
_A = "coco_panoptic" if is_panoptic else "coco_detection"
_A = ConditionalDetrImageProcessor(format=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" )
_A = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval()
_A = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_A = "conditional_detr." + src
rename_key(__lowercase , __lowercase , __lowercase )
_A = rename_backbone_keys(__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase , is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_A = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_A = state_dict.pop(__lowercase )
_A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_A = state_dict.pop(__lowercase )
_A = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_A = state_dict.pop(__lowercase )
_A = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_A = state_dict.pop(__lowercase )
_A = val
# finally, create HuggingFace model and load state dict
_A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
_A = conditional_detr(__lowercase )
_A = model(__lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'BridgeTowerImageProcessor'
__UpperCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : str , a__ : List[Any] , a__ : str ) -> Dict:
'''simple docstring'''
super().__init__(a__ , a__ )
def __call__( self : Tuple , a__ : Optional[int] , a__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a__ : bool = True , a__ : Union[bool, str, PaddingStrategy] = False , a__ : Union[bool, str, TruncationStrategy] = None , a__ : Optional[int] = None , a__ : int = 0 , a__ : Optional[int] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : Optional[Union[str, TensorType]] = None , **a__ : str , ) -> BatchEncoding:
'''simple docstring'''
_A = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
# add pixel_values + pixel_mask
_A = self.image_processor(
a__ , return_tensors=a__ , do_normalize=a__ , do_center_crop=a__ , **a__ )
encoding.update(a__ )
return encoding
def a_ ( self : List[Any] , *a__ : Dict , **a__ : str ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*a__ , **a__ )
def a_ ( self : Optional[int] , *a__ : Dict , **a__ : List[str] ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*a__ , **a__ )
@property
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 706 |
"""simple docstring"""
import random
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def a__ ( __lowercase , __lowercase , __lowercase ) -> int:
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def a__ ( ) -> Dict:
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 707 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 0 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = 0
_A = 0
_A = {}
def a_ ( self : List[str] , a__ : List[Any] ) -> Any:
'''simple docstring'''
if vertex not in self.adjacency:
_A = {}
self.num_vertices += 1
def a_ ( self : Union[str, Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : Dict ) -> Optional[int]:
'''simple docstring'''
self.add_vertex(a__ )
self.add_vertex(a__ )
if head == tail:
return
_A = weight
_A = weight
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = self.get_edges()
for edge in edges:
_A , _A , _A = edge
edges.remove((tail, head, weight) )
for i in range(len(a__ ) ):
_A = list(edges[i] )
edges.sort(key=lambda a__ : e[2] )
for i in range(len(a__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_A = edges[i][2] + 1
for edge in edges:
_A , _A , _A = edge
_A = weight
_A = weight
def __str__( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_A = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n" )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def a_ ( a__ : List[Any]=None , a__ : List[str]=None ) -> List[str]:
'''simple docstring'''
_A = Graph()
if vertices is None:
_A = []
if edges is None:
_A = []
for vertex in vertices:
g.add_vertex(a__ )
for edge in edges:
g.add_edge(*a__ )
return g
class snake_case :
def __init__( self : Any ) -> Optional[int]:
'''simple docstring'''
_A = {}
_A = {}
def __len__( self : List[str] ) -> Dict:
'''simple docstring'''
return len(self.parent )
def a_ ( self : List[Any] , a__ : Any ) -> List[Any]:
'''simple docstring'''
if item in self.parent:
return self.find(a__ )
_A = item
_A = 0
return item
def a_ ( self : str , a__ : List[str] ) -> str:
'''simple docstring'''
if item not in self.parent:
return self.make_set(a__ )
if item != self.parent[item]:
_A = self.find(self.parent[item] )
return self.parent[item]
def a_ ( self : str , a__ : Optional[int] , a__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = self.find(a__ )
_A = self.find(a__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_A = roota
return roota
if self.rank[roota] < self.rank[roota]:
_A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_A = roota
return roota
return None
@staticmethod
def a_ ( a__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = graph.num_vertices
_A = Graph.UnionFind()
_A = []
while num_components > 1:
_A = {}
for vertex in graph.get_vertices():
_A = -1
_A = graph.get_edges()
for edge in edges:
_A , _A , _A = edge
edges.remove((tail, head, weight) )
for edge in edges:
_A , _A , _A = edge
_A = union_find.find(a__ )
_A = union_find.find(a__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_A , _A , _A = cheap_edge[vertex]
if union_find.find(a__ ) != union_find.find(a__ ):
union_find.union(a__ , a__ )
mst_edges.append(cheap_edge[vertex] )
_A = num_components - 1
_A = Graph.build(edges=a__ )
return mst
| 708 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = "roberta"
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = "transformer"
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = f'''{prefix}.embeddings.{w}.weight'''
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[f'''lm_head.dense.{w}''']
a_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[f'''{prefix}.ln_f.{w}''']
a_ = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint) | 709 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_text_model'
def __init__( self : int , a__ : List[str]=3_05_24 , a__ : List[str]=7_68 , a__ : List[Any]=7_68 , a__ : int=30_72 , a__ : List[str]=7_68 , a__ : Dict=12 , a__ : Optional[int]=8 , a__ : Optional[Any]=5_12 , a__ : List[Any]="gelu" , a__ : Optional[Any]=1E-1_2 , a__ : Any=0.0 , a__ : int=0.0 , a__ : Dict=0.0_2 , a__ : Optional[Any]=3_05_22 , a__ : Any=2 , a__ : int=0 , a__ : Union[str, Any]=1_02 , a__ : Tuple=True , a__ : Optional[int]=True , **a__ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , sep_token_id=a__ , **a__ , )
_A = vocab_size
_A = hidden_size
_A = encoder_hidden_size
_A = intermediate_size
_A = projection_dim
_A = hidden_dropout_prob
_A = num_hidden_layers
_A = num_attention_heads
_A = max_position_embeddings
_A = layer_norm_eps
_A = hidden_act
_A = initializer_range
_A = attention_probs_dropout_prob
_A = is_decoder
_A = use_cache
@classmethod
def a_ ( cls : Optional[Any] , a__ : Union[str, os.PathLike] , **a__ : Optional[Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip_vision_model'
def __init__( self : Optional[Any] , a__ : Any=7_68 , a__ : List[str]=30_72 , a__ : str=5_12 , a__ : Any=12 , a__ : int=12 , a__ : int=3_84 , a__ : Tuple=16 , a__ : str="gelu" , a__ : Tuple=1E-5 , a__ : List[str]=0.0 , a__ : List[Any]=1E-1_0 , **a__ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
_A = hidden_size
_A = intermediate_size
_A = projection_dim
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
@classmethod
def a_ ( cls : Any , a__ : Union[str, os.PathLike] , **a__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
_A , _A = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
_A = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'blip'
__UpperCamelCase = True
def __init__( self : List[Any] , a__ : Optional[int]=None , a__ : str=None , a__ : List[str]=5_12 , a__ : Any=2.6_5_9_2 , a__ : str=2_56 , **a__ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
_A = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
_A = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
_A = BlipTextConfig(**a__ )
_A = BlipVisionConfig(**a__ )
_A = self.vision_config.hidden_size
_A = projection_dim
_A = logit_scale_init_value
_A = 1.0
_A = 0.0_2
_A = image_text_hidden_size
@classmethod
def a_ ( cls : Tuple , a__ : BlipTextConfig , a__ : BlipVisionConfig , **a__ : Optional[int] ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output | 621 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : str = "▁" , a__ : bool = True , a__ : Union[str, AddedToken] = "<unk>" , a__ : Union[str, AddedToken] = "</s>" , a__ : Union[str, AddedToken] = "<pad>" , ) -> int:
'''simple docstring'''
_A = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_A = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_A = token_dict["token"]
_A = Tokenizer(Unigram() )
_A = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
_A = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=a__ , add_prefix_space=a__ ),
pre_tokenizers.Digits(individual_digits=a__ ),
pre_tokenizers.Punctuation(),
] )
_A = decoders.Metaspace(replacement=a__ , add_prefix_space=a__ )
_A = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
_A = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(a__ , a__ )
def a_ ( self : Optional[Any] , a__ : Union[str, List[str]] , a__ : int = 80_00 , a__ : bool = True , ) -> List[Any]:
'''simple docstring'''
_A = trainers.UnigramTrainer(
vocab_size=a__ , special_tokens=self.special_tokens_list , show_progress=a__ , )
if isinstance(a__ , a__ ):
_A = [files]
self._tokenizer.train(a__ , trainer=a__ )
self.add_unk_id()
def a_ ( self : str , a__ : Union[Iterator[str], Iterator[Iterator[str]]] , a__ : int = 80_00 , a__ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
_A = trainers.UnigramTrainer(
vocab_size=a__ , special_tokens=self.special_tokens_list , show_progress=a__ , )
self._tokenizer.train_from_iterator(a__ , trainer=a__ )
self.add_unk_id()
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_A = json.loads(self._tokenizer.to_str() )
_A = self.special_tokens["unk"]["id"]
_A = Tokenizer.from_str(json.dumps(a__ ) )
| 710 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" ) | 621 | 0 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : int , a__ : str=None , a__ : Dict=None ) -> int:
'''simple docstring'''
_A = data
_A = previous
_A = next_node
def __str__( self : Any ) -> str:
'''simple docstring'''
return F"""{self.data}"""
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.data
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return self.next
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.previous
class snake_case :
def __init__( self : Tuple , a__ : Any ) -> int:
'''simple docstring'''
_A = head
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_A = self.current.get_data()
_A = self.current.get_next()
return value
class snake_case :
def __init__( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_A = None # First node in list
_A = None # Last node in list
def __str__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_A = self.head
_A = []
while current is not None:
nodes.append(current.get_data() )
_A = current.get_next()
return " ".join(str(a__ ) for node in nodes )
def __contains__( self : int , a__ : int ) -> Any:
'''simple docstring'''
_A = self.head
while current:
if current.get_data() == value:
return True
_A = current.get_next()
return False
def __iter__( self : str ) -> Tuple:
'''simple docstring'''
return LinkedListIterator(self.head )
def a_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def a_ ( self : Any , a__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
_A = node
_A = node
else:
self.insert_before_node(self.head , a__ )
def a_ ( self : Tuple , a__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(a__ )
else:
self.insert_after_node(self.tail , a__ )
def a_ ( self : Union[str, Any] , a__ : int ) -> None:
'''simple docstring'''
_A = Node(a__ )
if self.head is None:
self.set_head(a__ )
else:
self.set_tail(a__ )
def a_ ( self : Dict , a__ : Node , a__ : Node ) -> None:
'''simple docstring'''
_A = node
_A = node.previous
if node.get_previous() is None:
_A = node_to_insert
else:
_A = node_to_insert
_A = node_to_insert
def a_ ( self : Optional[Any] , a__ : Node , a__ : Node ) -> None:
'''simple docstring'''
_A = node
_A = node.next
if node.get_next() is None:
_A = node_to_insert
else:
_A = node_to_insert
_A = node_to_insert
def a_ ( self : int , a__ : int , a__ : int ) -> None:
'''simple docstring'''
_A = 1
_A = Node(a__ )
_A = self.head
while node:
if current_position == position:
self.insert_before_node(a__ , a__ )
return
current_position += 1
_A = node.next
self.insert_after_node(self.tail , a__ )
def a_ ( self : str , a__ : int ) -> Node:
'''simple docstring'''
_A = self.head
while node:
if node.get_data() == item:
return node
_A = node.get_next()
raise Exception("Node not found" )
def a_ ( self : Optional[Any] , a__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if (node := self.get_node(a__ )) is not None:
if node == self.head:
_A = self.head.get_next()
if node == self.tail:
_A = self.tail.get_previous()
self.remove_node_pointers(a__ )
@staticmethod
def a_ ( a__ : Node ) -> None:
'''simple docstring'''
if node.get_next():
_A = node.previous
if node.get_previous():
_A = node.next
_A = None
_A = None
def a_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return self.head is None
def a__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInpaintPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase = frozenset([])
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : List[str] , a__ : Tuple=0 ) -> int:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
_A = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInpaintPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="np" , )
_A = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
_A = "stabilityai/stable-diffusion-2-inpainting"
_A = PNDMScheduler.from_pretrained(a__ , subfolder="scheduler" )
_A = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = "Face of a yellow cat, high resolution, sitting on a park bench"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9 | 621 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'BlipImageProcessor'
__UpperCamelCase = 'AutoTokenizer'
def __init__( self : int , a__ : int , a__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = False
super().__init__(a__ , a__ )
_A = self.image_processor
def __call__( self : List[str] , a__ : ImageInput = None , a__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a__ : bool = True , a__ : Union[bool, str, PaddingStrategy] = False , a__ : Union[bool, str, TruncationStrategy] = None , a__ : Optional[int] = None , a__ : int = 0 , a__ : Optional[int] = None , a__ : Optional[bool] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : Optional[Union[str, TensorType]] = None , **a__ : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_A = self.tokenizer
_A = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_token_type_ids=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
return text_encoding
# add pixel_values
_A = self.image_processor(a__ , return_tensors=a__ )
if text is not None:
_A = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_token_type_ids=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
else:
_A = None
if text_encoding is not None:
encoding_image_processor.update(a__ )
return encoding_image_processor
def a_ ( self : Tuple , *a__ : int , **a__ : Tuple ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*a__ , **a__ )
def a_ ( self : Any , *a__ : List[str] , **a__ : Optional[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*a__ , **a__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Optional[Any] = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 713 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case ( _UpperCamelCase):
def __init__( self : List[Any] , a__ : Any ) -> Any:
'''simple docstring'''
_A = data
def __iter__( self : List[str] ) -> str:
'''simple docstring'''
for element in self.data:
yield element
def a__ ( __lowercase=True ) -> Tuple:
_A = Accelerator(even_batches=__lowercase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Union[str, Any]:
if iterable:
_A = DummyIterableDataset(torch.as_tensor(range(__lowercase ) ) )
else:
_A = TensorDataset(torch.as_tensor(range(__lowercase ) ) )
_A = DataLoader(__lowercase , batch_size=__lowercase )
_A = accelerator.prepare(__lowercase )
return dl
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
_A = create_dataloader(accelerator=__lowercase , dataset_size=__lowercase , batch_size=__lowercase )
_A = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a__ ( ) -> List[str]:
_A = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a__ ( ) -> List[Any]:
_A = create_accelerator(even_batches=__lowercase )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a__ ( ) -> int:
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowercase ):
_A = ddp_model(batch[0].float() )
_A = output.sum()
loss.backward()
batch_idxs.append(__lowercase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a__ ( __lowercase ) -> List[str]:
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a__ ( ) -> Tuple:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = train_dl.batch_sampler.even_batches
_A = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> int:
_A = True
_A = False
_A = create_accelerator(even_batches=__lowercase )
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
_A = create_dataloader(__lowercase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
_A = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
_A = torch.nn.Linear(1 , 1 )
_A = accelerator.prepare(__lowercase )
create_dataloader(__lowercase , dataset_size=3 , batch_size=1 , iterable=__lowercase )
with warnings.catch_warnings(record=__lowercase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowercase ):
pass
assert issubclass(w[-1].category , __lowercase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a__ ( ) -> Optional[Any]:
_A = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
_A = accelerator.state.distributed_type
_A = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowercase )
_A = original_state
if __name__ == "__main__":
main() | 621 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = torch.nn.Linear(10 , 10 )
_A = torch.optim.SGD(model.parameters() , 0.1 )
_A = Accelerator()
_A = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state() | 714 |
"""simple docstring"""
class snake_case :
def __init__( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = None
_A = None
_A = graph
self._normalize_graph(a__ , a__ )
_A = len(a__ )
_A = None
def a_ ( self : str , a__ : List[str] , a__ : List[Any] ) -> Dict:
'''simple docstring'''
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a_ ( self : List[Any] , a__ : Optional[Any] ) -> str:
'''simple docstring'''
_A = algorithm(self )
class snake_case :
def __init__( self : List[str] , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_A = True
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
class snake_case ( _UpperCamelCase):
def __init__( self : Optional[Any] , a__ : Dict ) -> List[str]:
'''simple docstring'''
super().__init__(a__ )
# use this to save your result
_A = -1
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class snake_case ( _UpperCamelCase):
def __init__( self : Union[str, Any] , a__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().__init__(a__ )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(a__ ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def a_ ( self : Dict , a__ : Any ) -> Optional[int]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def a_ ( self : str , a__ : Optional[int] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a_ ( self : Any , a__ : Dict ) -> Any:
'''simple docstring'''
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a_ = [0]
a_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a_ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''') | 621 | 0 |
"""simple docstring"""
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a_ = TypeVar("T")
class snake_case ( Generic[T]):
def __init__( self : Dict , a__ : bool = True ) -> None:
'''simple docstring'''
_A = {} # dictionary of lists
_A = directed
def a_ ( self : int , a__ : T , a__ : T ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
self.adj_list[destination_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
_A = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a__ )
_A = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_A = [destination_vertex]
_A = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
_A = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_A = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_A = [destination_vertex]
_A = []
return self
def __repr__( self : int ) -> str:
'''simple docstring'''
return pformat(self.adj_list ) | 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 621 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
a_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
a_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : int ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def a_ ( self : List[str] , a__ : Union[str, Any] , a__ : List[Any] , a__ : int=None , a__ : Optional[int]=None , a__ : Tuple=None , a__ : Any=None , a__ : Optional[int]="auto" , a__ : Tuple=-1 , a__ : Optional[int]=0.9 , a__ : Optional[int]=5 , a__ : Union[str, Any]=5_00 , a__ : Optional[Any]="gpt2-large" , a__ : Optional[int]=-1 , a__ : int=10_24 , a__ : Union[str, Any]=25 , a__ : Dict=5 , a__ : Optional[int]=True , a__ : int=25 , ) -> int:
'''simple docstring'''
_A = compute_mauve(
p_text=a__ , q_text=a__ , p_features=a__ , q_features=a__ , p_tokens=a__ , q_tokens=a__ , num_buckets=a__ , pca_max_data=a__ , kmeans_explained_var=a__ , kmeans_num_redo=a__ , kmeans_max_iter=a__ , featurize_model_name=a__ , device_id=a__ , max_text_length=a__ , divergence_curve_discretization_size=a__ , mauve_scaling_factor=a__ , verbose=a__ , seed=a__ , )
return out | 716 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 717 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __lowercase ) -> Optional[int]:
_A = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a__ ( __lowercase ) -> List[Any]:
_A , _A = emb.weight.shape
_A = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_A = emb.weight.data
return lin_layer
def a__ ( __lowercase , __lowercase="facebook/mbart-large-en-ro" , __lowercase=False , __lowercase=False ) -> List[str]:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowercase )
_A = state_dict["encoder.embed_tokens.weight"].shape[0]
_A = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_A = "relu"
_A = state_dict["decoder.embed_tokens.weight"]
_A = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a_ = parser.parse_args()
a_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 621 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 718 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.