code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def snake_case_ ( self):
lowercase__ : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(a , 'hidden_sizes'))
self.parent.assertTrue(hasattr(a , 'neck_hidden_sizes'))
self.parent.assertTrue(hasattr(a , 'num_attention_heads'))
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=13 , a=32 , a=2 , a=3 , a=640 , a=4 , a="silu" , a=3 , a=32 , a=0.1 , a=0.1 , a=0.1 , a=0.02 , a=True , a=True , a=10 , a=None , ):
lowercase__ : Optional[int] = parent
lowercase__ : Dict = batch_size
lowercase__ : Any = image_size
lowercase__ : Union[str, Any] = patch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Any = last_hidden_size
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Any = hidden_act
lowercase__ : str = conv_kernel_size
lowercase__ : str = output_stride
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = classifier_dropout_prob
lowercase__ : List[Any] = use_labels
lowercase__ : Optional[int] = is_training
lowercase__ : int = num_labels
lowercase__ : Dict = initializer_range
lowercase__ : int = scope
def snake_case_ ( self):
lowercase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ : List[str] = None
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels)
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , a , a , a , a):
lowercase__ : Optional[int] = MobileViTModel(config=a)
model.to(a)
model.eval()
lowercase__ : Dict = model(a)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self , a , a , a , a):
lowercase__ : Dict = self.num_labels
lowercase__ : List[Any] = MobileViTForImageClassification(a)
model.to(a)
model.eval()
lowercase__ : List[Any] = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , a , a , a , a):
lowercase__ : List[Any] = self.num_labels
lowercase__ : Dict = MobileViTForSemanticSegmentation(a)
model.to(a)
model.eval()
lowercase__ : Tuple = model(a)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Union[str, Any] = model(a , labels=a)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self):
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (lowercase__ , lowercase__ , unittest.TestCase ):
__lowerCamelCase : Any = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Optional[Any] = False
def snake_case_ ( self):
lowercase__ : Union[str, Any] = MobileViTModelTester(self)
lowercase__ : List[str] = MobileViTConfigTester(self , config_class=a , has_text_modality=a)
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds')
def snake_case_ ( self):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings')
def snake_case_ ( self):
pass
@unittest.skip(reason='MobileViT does not output attentions')
def snake_case_ ( self):
pass
def snake_case_ ( self):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(a)
lowercase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def snake_case_ ( self):
pass
def snake_case_ ( self):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def snake_case_ ( self):
def check_hidden_states_output(a , a , a):
lowercase__ : Tuple = model_class(a)
model.to(a)
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a))
lowercase__ : Dict = outputs.hidden_states
lowercase__ : Optional[Any] = 5
self.assertEqual(len(a) , a)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowercase__ : Any = 2
for i in range(len(a)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Any = True
check_hidden_states_output(a , a , a)
def snake_case_ ( self):
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
def snake_case_ ( self):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a)
@slow
def snake_case_ ( self):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = MobileViTModel.from_pretrained(a)
self.assertIsNotNone(a)
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@cached_property
def snake_case_ ( self):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None
@slow
def snake_case_ ( self):
lowercase__ : Dict = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(a)
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : List[Any] = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
lowercase__ : Dict = model(**a)
# verify the logits
lowercase__ : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a)
lowercase__ : Dict = torch.tensor([-1.9_364, -1.2_327, -0.4_653]).to(a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4))
@slow
def snake_case_ ( self):
lowercase__ : Any = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
lowercase__ : Optional[Any] = model.to(a)
lowercase__ : Optional[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
lowercase__ : Any = prepare_img()
lowercase__ : Optional[int] = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
lowercase__ : Any = model(**a)
lowercase__ : int = outputs.logits
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , a)
lowercase__ : Optional[Any] = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a , atol=1e-4))
@slow
def snake_case_ ( self):
lowercase__ : Any = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
lowercase__ : Optional[int] = model.to(a)
lowercase__ : Any = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
lowercase__ : str = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
lowercase__ : Dict = model(**a)
lowercase__ : Optional[Any] = outputs.logits.detach().cpu()
lowercase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a , target_sizes=[(50, 60)])
lowercase__ : Union[str, Any] = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , a)
lowercase__ : int = image_processor.post_process_semantic_segmentation(outputs=a)
lowercase__ : Optional[Any] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , a)
| 214
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
from functools import lru_cache
def lowerCAmelCase_ ( _lowercase : Optional[Any]) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = 2
a__ : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowercase)
if n > 1:
factors.add(_lowercase)
return factors
@lru_cache
def lowerCAmelCase_ ( _lowercase : Any) -> List[Any]:
"""simple docstring"""
return len(unique_prime_factors(_lowercase))
def lowerCAmelCase_ ( _lowercase : Tuple) -> Optional[int]:
"""simple docstring"""
return len(set(_lowercase)) in (0, 1)
def lowerCAmelCase_ ( _lowercase : int) -> List[Any]:
"""simple docstring"""
a__ : Dict = 2
while True:
# Increment each value of a generated range
a__ : Any = [base + i for i in range(_lowercase)]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
a__ : Dict = [upf_len(_lowercase) for x in group]
checker.append(_lowercase)
# If all numbers in the list are equal, return the group variable.
if equality(_lowercase):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( _lowercase : int = 4) -> Dict:
"""simple docstring"""
a__ : int = run(_lowercase)
return results[0] if len(_lowercase) else None
if __name__ == "__main__":
print(solution())
| 170
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase (lowercase__ ):
"""simple docstring"""
def __init__( self , A , A ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case : Any = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = None , A = 0.0 , A = 5_0 , A = None , A = "pil" , A = True , ) -> str:
if isinstance(self.unet.config.sample_size , A ):
snake_case : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case : List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case : Union[str, Any] = randn_tensor(A , generator=A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case : List[Any] = self.unet(A , A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case : Union[str, Any] = self.scheduler.step(
A , A , A , eta=A , use_clipped_model_output=A , generator=A ).prev_sample
snake_case : int = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Dict = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 124
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowerCAmelCase ( yaml.SafeLoader ):
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =[self.constructed_objects[key_node] for key_node, _ in node.value]
_lowercase =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
_lowercase =Counter(lowerCAmelCase )
_lowercase =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=False ) -> str:
'''simple docstring'''
_lowercase =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def a ( A__ : Union[str, Any] ) -> Any:
"""simple docstring"""
_lowercase =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_lowercase =full_content[1:].index('---' ) + 1
_lowercase ="\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(A__ )
class __lowerCAmelCase ( lowercase__ ):
_a = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def A__ ( cls , lowerCAmelCase ) -> int:
'''simple docstring'''
with open(lowerCAmelCase , encoding='utf-8' ) as readme_file:
_lowercase =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding='utf-8' ) as readme_file:
_lowercase =readme_file.read()
else:
_lowercase =None
_lowercase =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(lowerCAmelCase )
def A__ ( self , lowerCAmelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
_lowercase =_split_yaml_from_readme(lowerCAmelCase )
_lowercase ="---\n" + self.to_yaml_string() + "---\n" + content
else:
_lowercase ="---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def A__ ( cls , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_lowercase ={
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding='utf-8' , ).decode('utf-8' )
lowercase_ = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
lowercase_ = ap.parse_args()
lowercase_ = Path(args.readme_filepath)
lowercase_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 205
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
"""simple docstring"""
import argparse
_A = "docs/source/_static/js/custom.js"
def lowercase_ ( __UpperCAmelCase ) -> int:
with open(__UpperCAmelCase , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase__ : Optional[Any] = f.readlines()
lowerCAmelCase__ : Any = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowerCAmelCase__ : Any = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
_A = parser.parse_args()
update_custom_js(args.version)
| 242
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
"""simple docstring"""
import math
def A__ ( UpperCamelCase , UpperCamelCase ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 292
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=3 , A=32 , A=3 , A=10 , A=[10, 20, 30, 40] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = num_channels
lowerCamelCase = embeddings_size
lowerCamelCase = hidden_sizes
lowerCamelCase = depths
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_act
lowerCamelCase = num_labels
lowerCamelCase = scope
lowerCamelCase = len(A )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> int:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self , A , A , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = TFResNetModel(config=A )
lowerCamelCase = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFResNetForImageClassification(A )
lowerCamelCase = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase = config_and_inputs
lowerCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase : Union[str, Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase : List[str] = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : int = False
UpperCamelCase : Any = False
UpperCamelCase : Any = False
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = TFResNetModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=A , has_text_modality=A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def __A ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(A )
lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(A , A , A ):
lowerCamelCase = model_class(A )
lowerCamelCase = model(**self._prepare_for_class(A , A ) )
lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase = layer_type
lowerCamelCase = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
check_hidden_states_output(A , A , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=A , return_tensors="""tf""" )
# forward pass
lowerCamelCase = model(**A )
# verify the logits
lowerCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 252
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from __future__ import annotations
A__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
'''simple docstring'''
lowercase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase_ ) )
] # the reference grid
lowercase__ = 1
lowercase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase_ ) )
] # the action grid
lowercase__ = init[0]
lowercase__ = init[1]
lowercase__ = 0
lowercase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowercase__ = [[f, g, x, y]]
lowercase__ = False # flag that is set when search is complete
lowercase__ = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase_ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowercase__ = cell.pop()
lowercase__ = next_cell[2]
lowercase__ = next_cell[3]
lowercase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowercase__ = True
else:
for i in range(len(lowerCamelCase_ ) ): # to try out different valid actions
lowercase__ = x + DIRECTIONS[i][0]
lowercase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowercase__ = g + cost
lowercase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowercase__ = 1
lowercase__ = i
lowercase__ = []
lowercase__ = goal[0]
lowercase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowercase__ = x - DIRECTIONS[action[x][y]][0]
lowercase__ = y - DIRECTIONS[action[x][y]][1]
lowercase__ = xa
lowercase__ = ya
invpath.append([x, y] )
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
path.append(invpath[len(lowerCamelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
A__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
A__ : Tuple = [0, 0]
# all coordinates are given in format [y,x]
A__ : int = [len(grid) - 1, len(grid[0]) - 1]
A__ : Tuple = 1
# the cost map which pushes the path closer to the goal
A__ : Optional[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
A__ : Dict = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
A__ : List[Any] = 99
A__ : Optional[Any] = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 207
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase_ = 0
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase_ = tuple[int, int]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
__a = self.g_cost + self.h_cost
def __UpperCAmelCase ( self ):
__a = self.pos_x - self.goal_x
__a = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_a ) + abs(_a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
return [self.start.pos]
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = AStar(_a , _a )
__a = AStar(_a , _a )
__a = False
def __UpperCAmelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__a = self.fwd_astar.open_nodes.pop(0 )
__a = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_a , _a )
self.fwd_astar.closed_nodes.append(_a )
self.bwd_astar.closed_nodes.append(_a )
__a = current_bwd_node
__a = current_fwd_node
__a = {
self.fwd_astar: self.fwd_astar.get_successors(_a ),
self.bwd_astar: self.bwd_astar.get_successors(_a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_a )
else:
# retrieve the best current path
__a = astar.open_nodes.pop(
astar.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_a )
else:
astar.open_nodes.append(_a )
return [self.fwd_astar.start.pos]
def __UpperCAmelCase ( self , _a , _a ):
__a = self.fwd_astar.retrace_path(_a )
__a = self.bwd_astar.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
__a = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase_ = time.time()
lowercase_ = AStar(init, goal)
lowercase_ = a_star.search()
lowercase_ = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
lowercase_ = time.time()
lowercase_ = BidirectionalAStar(init, goal)
lowercase_ = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 45
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import argparse
import copy
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Tuple:
'''simple docstring'''
lowercase = {}
with open(lowerCAmelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase = []
_list.append([line.split()[1], line.split()[2]] )
lowercase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase = []
_list.append([line.split()[0], line.split()[2]] )
lowercase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ ) as f:
lowercase = f.read(1 )
lowercase = start_node
lowercase = []
lowercase = start_node
lowercase = 0
while visiting not in first_solution:
lowercase = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCAmelCase__ ) and k[0] not in first_solution:
lowercase = k[1]
lowercase = k[0]
first_solution.append(lowerCAmelCase__ )
lowercase = distance_of_first_solution + int(lowerCAmelCase__ )
lowercase = best_node
first_solution.append(lowerCAmelCase__ )
lowercase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
lowercase = []
for n in solution[1:-1]:
lowercase = solution.index(lowerCAmelCase__ )
for kn in solution[1:-1]:
lowercase = solution.index(lowerCAmelCase__ )
if n == kn:
continue
lowercase = copy.deepcopy(lowerCAmelCase__ )
lowercase = kn
lowercase = n
lowercase = 0
for k in _tmp[:-1]:
lowercase = _tmp[_tmp.index(lowerCAmelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase = distance + int(i[1] )
_tmp.append(lowerCAmelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCAmelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase = 1
lowercase = first_solution
lowercase = []
lowercase = distance_of_first_solution
lowercase = solution
while count <= iters:
lowercase = find_neighborhood(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = 0
lowercase = neighborhood[index_of_best_solution]
lowercase = len(lowerCAmelCase__ ) - 1
lowercase = False
while not found:
lowercase = 0
while i < len(lowerCAmelCase__ ):
if best_solution[i] != solution[i]:
lowercase = best_solution[i]
lowercase = solution[i]
break
lowercase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase = True
lowercase = best_solution[:-1]
lowercase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase = cost
lowercase = solution
else:
lowercase = index_of_best_solution + 1
lowercase = neighborhood[index_of_best_solution]
if len(lowerCAmelCase__ ) >= size:
tabu_list.pop(0 )
lowercase = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any]=None ) -> str:
'''simple docstring'''
lowercase = generate_neighbours(args.File )
lowercase = generate_first_solution(
args.File , lowerCAmelCase__ )
lowercase = tabu_search(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 197
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
from __future__ import annotations
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
return []
__a = min(_UpperCAmelCase ), max(_UpperCAmelCase )
__a = int(max_value - min_value ) + 1
__a = [[] for _ in range(_UpperCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCAmelCase )
return [v for bucket in buckets for v in sorted(_UpperCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 49
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
lowercase__ : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Union[str, Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase__ : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase__ : Optional[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE_ )
return next_generation
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Create output image
lowercase__ : List[Any] = Image.new('RGB' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE_ )) )
lowercase__ : Dict = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE_ ) ):
for y in range(len(cells[0] ) ):
lowercase__ : Dict = 255 - cells[y][x] * 255
lowercase__ : Tuple = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = new_generation(SCREAMING_SNAKE_CASE_ )
return images
if __name__ == "__main__":
snake_case_ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 214
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
import re
from ..utils import cached_file
# docstyle-ignore
_lowercase : str ="\nHuman: <<task>>\n\nAssistant: "
_lowercase : int ="huggingface-tools/default-prompts"
_lowercase : List[str] ={"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Dict="run") -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
a__ : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , _lowercase) is not None:
return prompt_or_repo_id
a__ : List[str] = cached_file(
_lowercase , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name})
with open(_lowercase , """r""" , encoding="""utf-8""") as f:
return f.read()
| 170
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
lowerCamelCase : str = tuple[float, float, float]
lowerCamelCase : int = tuple[float, float, float]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : List[str] = end_pointa[0] - end_pointa[0]
snake_case : Optional[int] = end_pointa[1] - end_pointa[1]
snake_case : int = end_pointa[2] - end_pointa[2]
return (x, y, z)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case : str = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
return tuple(round(lowercase ,lowercase ) for x in vector ) == (0, 0, 0)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = 10 ) -> Optional[Any]:
snake_case : Any = create_vector(lowercase ,lowercase )
snake_case : Any = create_vector(lowercase ,lowercase )
return is_zero_vector(get_ad_vectors_cross(lowercase ,lowercase ) ,lowercase )
| 124
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_A = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 242
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A__ ( UpperCamelCase ):
A = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase ):
A = emb.weight.shape
A = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase ):
A = torch.load(UpperCamelCase , map_location="cpu" )
A = Namespace(**checkpoint["cfg"]["model"] )
A = checkpoint["model"]
remove_ignore_keys_(UpperCamelCase )
A = state_dict["decoder.embed_tokens.weight"].shape[0]
A = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
A = XGLMConfig(
vocab_size=UpperCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A = XGLMForCausalLM(UpperCamelCase )
A = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
print(UpperCamelCase )
A = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 292
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase : Dict = datasets.logging.get_logger(__name__)
UpperCAmelCase : List[str] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCAmelCase : List[Any] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCAmelCase : Optional[int] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Optional[Any]="dummy_doc" ):
'''simple docstring'''
lowerCamelCase = {doc: key_lines}
lowerCamelCase = {doc: sys_lines}
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = reader.get_doc_mentions(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = reader.get_doc_mentions(lowerCamelCase__ , sys_doc_lines[doc] , lowerCamelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
if remove_nested:
lowerCamelCase = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = get_coref_infos(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
for name, metric in metrics:
lowerCamelCase = evaluator.evaluate_documents(lowerCamelCase__ , lowerCamelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
lowerCamelCase = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowerCamelCase = line.split()[5]
if not parse_col == "-":
lowerCamelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def __A ( self , A , A , A=True , A=False , A=False , A=False ) -> str:
'''simple docstring'''
lowerCamelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowerCamelCase = util.check_gold_parse_annotation(A )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase = evaluate(
key_lines=A , sys_lines=A , metrics=A , NP_only=A , remove_nested=A , keep_singletons=A , min_span=A , )
return score
| 252
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Dict = logging.get_logger(__name__)
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ = 192
lowercase__ = 768
lowercase__ = 12
lowercase__ = 3
lowercase__ = [800, 1333]
lowercase__ = False
elif yolos_name == "yolos_s_dWr":
lowercase__ = 330
lowercase__ = 14
lowercase__ = 6
lowercase__ = 1320
elif "yolos_s" in yolos_name:
lowercase__ = 384
lowercase__ = 1536
lowercase__ = 12
lowercase__ = 6
elif "yolos_b" in yolos_name:
lowercase__ = [800, 1344]
lowercase__ = 91
lowercase__ = "huggingface/label-files"
lowercase__ = "coco-detection-id2label.json"
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: config.hidden_size, :]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[-config.hidden_size :, :]
lowercase__ = in_proj_bias[-config.hidden_size :]
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "backbone" in name:
lowercase__ = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowercase__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowercase__ = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowercase__ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowercase__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowercase__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowercase__ = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowercase__ = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowercase__ = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
lowercase__ = key.split('''.''' )
lowercase__ = int(key_split[2] )
lowercase__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[
dim : dim * 2, :
]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
else:
lowercase__ = val
return orig_state_dict
def a ( ):
'''simple docstring'''
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ):
'''simple docstring'''
lowercase__ = get_yolos_config(lowerCamelCase_ )
# load original state_dict
lowercase__ = torch.load(lowerCamelCase_ , map_location='''cpu''' )["model"]
# load 🤗 model
lowercase__ = YolosForObjectDetection(lowerCamelCase_ )
model.eval()
lowercase__ = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ = 800 if yolos_name != "yolos_ti" else 512
lowercase__ = YolosImageProcessor(format='''coco_detection''' , size=lowerCamelCase_ )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase__ = model(**lowerCamelCase_ )
lowercase__ = outputs.logits, outputs.pred_boxes
lowercase__ = None, None
if yolos_name == "yolos_ti":
lowercase__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.97_69, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowercase__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
lowercase__ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print('''Pushing to the hub...''' )
lowercase__ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase_ , organization='''hustvl''' )
model.push_to_hub(lowerCamelCase_ , organization='''hustvl''' )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 207
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
"""simple docstring"""
import string
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
for key in range(len(string.ascii_uppercase ) ):
__a = ""
for symbol in message:
if symbol in string.ascii_uppercase:
__a = string.ascii_uppercase.find(lowerCAmelCase__ )
__a = num - key
if num < 0:
__a = num + len(string.ascii_uppercase )
__a = translated + string.ascii_uppercase[num]
else:
__a = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def lowercase ( ) -> List[str]:
__a = input('''Encrypted message: ''' )
__a = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCAmelCase : Tuple =get_tests_dir("""fixtures""")
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = mock.Mock()
lowercase = 500
lowercase = {}
lowercase = HTTPError
lowercase = {}
# Download this model to make sure it's in the cache.
lowercase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__lowerCAmelCase ) as mock_head:
lowercase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self ):
"""simple docstring"""
lowercase = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class _A ( unittest.TestCase ):
@classmethod
def A__ ( cls ):
"""simple docstring"""
lowercase = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def A__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def A__ ( self ):
"""simple docstring"""
lowercase = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase , repo_id="""test-feature-extractor""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowercase = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 197
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _A :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=37 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=50 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Dict=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Any):
'''simple docstring'''
(
__a
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
__a = BertGenerationEncoder(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = True
__a = BertGenerationEncoder(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = True
__a = True
__a = BertGenerationDecoder(config=__SCREAMING_SNAKE_CASE).to(__SCREAMING_SNAKE_CASE).eval()
# first forward pass
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size)
__a = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1)
__a = torch.cat([input_mask, next_mask] , dim=-1)
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0]
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1]).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , *__SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
__a = BertGenerationDecoder(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ):
UpperCamelCase__ : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCamelCase__ : List[str] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = BertGenerationEncoderTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
(
__a
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
__a = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE)[0]
__a = torch.Size([1, 8, 1_024])
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
__a = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE)[0]
__a = torch.Size([1, 8, 50_358])
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 49
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
snake_case_ = "bert-base-cased"
snake_case_ = "fp16"
snake_case_ = "bf16"
snake_case_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def snake_case_ ( self):
super().setUp()
lowercase__ : Optional[int] = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def snake_case_ ( self):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(a):
lowercase__ : List[Any] = self.dist_env.copy()
lowercase__ : Any = f"""{i + 1}"""
lowercase__ : List[str] = strategy
with mockenv_context(**a):
lowercase__ : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def snake_case_ ( self):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(a):
lowercase__ : Dict = self.dist_env.copy()
lowercase__ : Tuple = prefetch_policy
with mockenv_context(**a):
lowercase__ : int = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def snake_case_ ( self):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(a):
lowercase__ : Optional[int] = self.dist_env.copy()
lowercase__ : Union[str, Any] = state_dict_type
with mockenv_context(**a):
lowercase__ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def snake_case_ ( self):
lowercase__ : Union[str, Any] = AutoModel.from_pretrained(a)
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase__ : Union[str, Any] = self.dist_env.copy()
lowercase__ : Dict = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase__ : List[Any] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowercase__ : int = "2000"
with mockenv_context(**a):
lowercase__ : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
lowercase__ : List[Any] = self.dist_env.copy()
lowercase__ : Union[str, Any] = "TRANSFORMER_BASED_WRAP"
lowercase__ : Optional[Any] = "T5Layer"
with mockenv_context(**a):
lowercase__ : str = FullyShardedDataParallelPlugin()
with self.assertRaises(a) as cm:
fsdp_plugin.set_auto_wrap_policy(a)
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception))
lowercase__ : Optional[int] = self.dist_env.copy()
lowercase__ : str = "SIZE_BASED_WRAP"
lowercase__ : List[str] = "0"
with mockenv_context(**a):
lowercase__ : Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(a)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def snake_case_ ( self):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase__ : Tuple = self.dist_env.copy()
lowercase__ : Tuple = mp_dtype
with mockenv_context(**a):
lowercase__ : Any = Accelerator()
if mp_dtype == "fp16":
lowercase__ : int = torch.floataa
elif mp_dtype == "bf16":
lowercase__ : Optional[Any] = torch.bfloataa
lowercase__ : Union[str, Any] = MixedPrecision(param_dtype=a , reduce_dtype=a , buffer_dtype=a)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , a)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , a))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(a)
def snake_case_ ( self):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase__ : Optional[Any] = self.dist_env.copy()
lowercase__ : Union[str, Any] = str(a).lower()
with mockenv_context(**a):
lowercase__ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=a))
@require_fsdp
@require_multi_gpu
@slow
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
def snake_case_ ( self):
super().setUp()
lowercase__ : Any = 0.82
lowercase__ : Dict = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowercase__ : Union[str, Any] = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase__ : str = 160
lowercase__ : List[Any] = 160
lowercase__ : Optional[int] = inspect.getfile(accelerate.test_utils)
lowercase__ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'external_deps'])
def snake_case_ ( self):
lowercase__ : List[str] = os.path.join(self.test_scripts_folder , 'test_performance.py')
lowercase__ : Dict = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowercase__ : Optional[Any] = cmd.copy()
for i, strategy in enumerate(a):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""")
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no')
else:
cmd_config.append('--mixed_precision=fp16')
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000')
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a , env=os.environ.copy())
def snake_case_ ( self):
lowercase__ : int = os.path.join(self.test_scripts_folder , 'test_checkpointing.py')
lowercase__ : Optional[int] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(a):
lowercase__ : Tuple = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""")
if strategy != "FULL_SHARD":
continue
lowercase__ : Union[str, Any] = len(a)
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase__ : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""")
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'--partial_train_epoch=1',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a , env=os.environ.copy())
lowercase__ : Union[str, Any] = cmd_config[:-1]
lowercase__ : Optional[int] = os.path.join(self.tmpdir , 'epoch_0')
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a , env=os.environ.copy())
def snake_case_ ( self):
lowercase__ : Tuple = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py')
lowercase__ : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase__ : Union[str, Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'])
else:
cmd_config.extend(['--mixed_precision=no'])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'])
for i, strategy in enumerate(a):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""")
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""")
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000')
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(a , env=os.environ.copy())
| 214
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , *__lowercase , __lowercase=None , __lowercase=None , **__lowercase ) -> Tuple:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
a__ : Union[str, Any] = eval_examples
a__ : int = post_process_function
def SCREAMING_SNAKE_CASE__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = "eval" ) -> List[str]:
"""simple docstring"""
a__ : str = self.eval_dataset if eval_dataset is None else eval_dataset
a__ : Optional[int] = self.get_eval_dataloader(__lowercase )
a__ : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a__ : Optional[Any] = self.compute_metrics
a__ : Dict = None
a__ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a__ : List[str] = time.time()
try:
a__ : List[str] = eval_loop(
__lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
a__ : str = compute_metrics
a__ : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a__ : int = self.post_process_function(__lowercase , __lowercase , output.predictions )
a__ : Optional[int] = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a__ : Union[str, Any] = metrics.pop(__lowercase )
metrics.update(output.metrics )
else:
a__ : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a__ : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowercase )
return metrics
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=None , __lowercase = "test" ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[int] = self.get_test_dataloader(__lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a__ : Optional[int] = self.compute_metrics
a__ : Union[str, Any] = None
a__ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a__ : Optional[Any] = time.time()
try:
a__ : Dict = eval_loop(
__lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowercase , metric_key_prefix=__lowercase , )
finally:
a__ : Tuple = compute_metrics
a__ : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowercase , __lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a__ : str = self.post_process_function(__lowercase , __lowercase , output.predictions , """predict""" )
a__ : Dict = self.compute_metrics(__lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a__ : List[Any] = metrics.pop(__lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowercase )
| 170
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class __lowercase (metaclass=lowercase__ ):
"""simple docstring"""
_snake_case = ["sentencepiece"]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
| 124
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self ) -> Tuple:
'''simple docstring'''
debug_launcher(test_ops.main )
| 205
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
_A = logging.getLogger(__name__)
_A = {"facebook/bart-base": BartForConditionalGeneration}
_A = {"facebook/bart-base": BartTokenizer}
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : Dict = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__UpperCAmelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCAmelCase , )
parser.add_argument(
"""--config_name""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__UpperCAmelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Where to store the final ONNX file.""" )
lowerCAmelCase__ : str = parser.parse_args()
return args
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="cpu" ) -> Optional[Any]:
lowerCAmelCase__ : str = model_dict[model_name].from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = tokenizer_dict[model_name].from_pretrained(__UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Optional[int] = 0
return huggingface_model, tokenizer
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
model.eval()
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Dict = torch.jit.script(BARTBeamSearchGenerator(__UpperCAmelCase ) )
with torch.no_grad():
lowerCAmelCase__ : Any = "My friends are cool but they eat too many carbs."
lowerCAmelCase__ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
lowerCAmelCase__ : int = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__UpperCAmelCase , max_length=__UpperCAmelCase , early_stopping=__UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__UpperCAmelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __UpperCAmelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__UpperCAmelCase , )
logger.info("""Model exported to {}""".format(__UpperCAmelCase ) )
lowerCAmelCase__ : Any = remove_dup_initializers(os.path.abspath(__UpperCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__UpperCAmelCase ) )
lowerCAmelCase__ : List[str] = onnxruntime.InferenceSession(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = ort_sess.run(
__UpperCAmelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__UpperCAmelCase ),
"""max_length""": np.array(__UpperCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Optional[Any] = parse_args()
lowerCAmelCase__ : Any = 5
lowerCAmelCase__ : Optional[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ : str = torch.device(args.device )
lowerCAmelCase__ : Tuple = load_model_tokenizer(args.model_name_or_path , __UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__UpperCAmelCase )
if args.max_length:
lowerCAmelCase__ : List[Any] = args.max_length
if args.num_beams:
lowerCAmelCase__ : Dict = args.num_beams
if args.output_file_path:
lowerCAmelCase__ : int = args.output_file_path
else:
lowerCAmelCase__ : int = "BART.onnx"
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 242
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
def A__ ( UpperCamelCase ):
A = 2
A = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCamelCase )
if n > 1:
factors.append(UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
import math
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( lowerCamelCase__ : str = 10001 ):
'''simple docstring'''
try:
lowerCamelCase = int(lowerCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
lowerCamelCase = []
lowerCamelCase = 2
while len(lowerCamelCase__ ) < nth:
if is_prime(lowerCamelCase__ ):
primes.append(lowerCamelCase__ )
num += 1
else:
num += 1
return primes[len(lowerCamelCase__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a ( ):
'''simple docstring'''
lowercase__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 207
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase_ = "src/transformers"
lowercase_ = "docs/source/en/tasks"
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Dict:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
# Find the start prompt.
__a = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__a = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(TRANSFORMERS_PATH)
lowercase_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
__a = TASK_GUIDE_TO_MODELS[task_guide]
__a = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase__ , set() )
__a = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
__a = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__a = get_model_list_for_task(lowerCAmelCase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 45
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _A ( lowercase__ ):
def A__ ( self ):
"""simple docstring"""
lowercase = tempfile.mkdtemp()
lowercase = 8
# DPR tok
lowercase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
lowercase = os.path.join(__lowerCAmelCase , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowercase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase = {"unk_token": "<unk>"}
lowercase = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
lowercase = os.path.join(__lowerCAmelCase , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(__lowerCAmelCase , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def A__ ( self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def A__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def A__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ):
"""simple docstring"""
lowercase = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_dataset()
lowercase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
lowercase = dataset
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.get_dummy_dataset()
lowercase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
lowercase = os.path.join(self.tmpdirname , """dataset""" )
lowercase = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __lowerCAmelCase ) , )
return retriever
def A__ ( self ):
"""simple docstring"""
lowercase = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
lowercase = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
lowercase = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__lowerCAmelCase , open(__lowerCAmelCase , """wb""" ) )
lowercase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = self.get_dummy_canonical_hf_index_retriever()
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=__lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
lowercase = self.get_dummy_dataset()
retriever.save_pretrained(__lowerCAmelCase )
lowercase = RagRetriever.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCAmelCase )
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=__lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCAmelCase )
lowercase = RagRetriever.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCAmelCase )
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=__lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCAmelCase )
lowercase = RagRetriever.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = self.get_dummy_legacy_index_retriever()
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=__lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , __lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__lowerCAmelCase )
lowercase = RagRetriever.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever.retrieve(__lowerCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ):
"""simple docstring"""
import torch
lowercase = 1
lowercase = self.get_dummy_canonical_hf_index_retriever()
lowercase = [[5, 7], [10, 11]]
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever(__lowerCAmelCase , __lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCAmelCase )
lowercase = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
lowercase = retriever(
__lowerCAmelCase , __lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCAmelCase , return_tensors="""pt""" , )
lowercase = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dpr_ctx_encoder_tokenizer()
lowercase = 1
lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCAmelCase )
retriever.set_ctx_encoder_tokenizer(__lowerCAmelCase )
lowercase = [[5, 7], [10, 11]]
lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase = retriever(__lowerCAmelCase , __lowerCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCAmelCase )
self.assertEqual(
len(__lowerCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , __lowerCAmelCase ) # check for doc token related keys in dictionary.
| 197
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__snake_case :List[Any] = logging.get_logger(__name__)
__snake_case :Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case :List[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__snake_case :List[str] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__snake_case :Dict = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class _A ( lowercase__ ):
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict = SqueezeBertTokenizer
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : str="[UNK]" , __SCREAMING_SNAKE_CASE : Any="[SEP]" , __SCREAMING_SNAKE_CASE : Optional[int]="[PAD]" , __SCREAMING_SNAKE_CASE : Union[str, Any]="[CLS]" , __SCREAMING_SNAKE_CASE : Dict="[MASK]" , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class SCREAMING_SNAKE_CASE__ (lowercase__ ):
__lowerCamelCase : List[Any] = "realm"
def __init__( self , a=3_0522 , a=768 , a=128 , a=12 , a=12 , a=8 , a=3072 , a="gelu_new" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-12 , a=256 , a=10 , a=1e-3 , a=5 , a=320 , a=1335_3718 , a=5000 , a=1 , a=0 , a=2 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
# Common config
lowercase__ : str = vocab_size
lowercase__ : Dict = max_position_embeddings
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : List[Any] = retriever_proj_size
lowercase__ : str = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = num_candidates
lowercase__ : List[Any] = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Dict = initializer_range
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Any = layer_norm_eps
# Reader config
lowercase__ : Any = span_hidden_size
lowercase__ : List[Any] = max_span_width
lowercase__ : Any = reader_layer_norm_eps
lowercase__ : Union[str, Any] = reader_beam_size
lowercase__ : List[Any] = reader_seq_len
# Retrieval config
lowercase__ : Union[str, Any] = num_block_records
lowercase__ : Any = searcher_beam_size
| 214
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = inspect.getfile(accelerate.test_utils )
a__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
a__ : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Dict = F'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
a__ : str = [sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 170
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : str = len(lowercase )
# We need to create solution object to save path.
snake_case : Tuple = [[0 for _ in range(lowercase )] for _ in range(lowercase )]
snake_case : List[str] = run_maze(lowercase ,0 ,0 ,lowercase )
if solved:
print("""\n""".join(str(lowercase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[int]:
snake_case : str = len(lowercase )
# Final check point.
if i == j == (size - 1):
snake_case : int = 1
return True
snake_case : str = (not i < 0) and (not j < 0) # Check lower bounds
snake_case : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case : int = 1
# check for directions
if (
run_maze(lowercase ,i + 1 ,lowercase ,lowercase )
or run_maze(lowercase ,lowercase ,j + 1 ,lowercase )
or run_maze(lowercase ,i - 1 ,lowercase ,lowercase )
or run_maze(lowercase ,lowercase ,j - 1 ,lowercase )
):
return True
snake_case : int = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
from torch import nn
def a ( A__ : Tuple ) -> List[Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 205
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase_ ( __UpperCAmelCase ) -> Optional[int]:
for param in module.parameters():
lowerCAmelCase__ : Any = False
def lowercase_ ( ) -> Union[str, Any]:
lowerCAmelCase__ : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ : int = "mps"
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def lowercase_ ( __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : str = plt.imshow(__UpperCAmelCase )
fig.axes.get_xaxis().set_visible(__UpperCAmelCase )
fig.axes.get_yaxis().set_visible(__UpperCAmelCase )
plt.show()
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : str = datetime.now()
lowerCAmelCase__ : Tuple = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 242
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_snake_case : Dict = None
_snake_case : int = logging.get_logger(__name__)
_snake_case : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
_snake_case : Optional[Any] = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
_snake_case : str = "▁"
class _UpperCAmelCase ( lowercase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = BigBirdTokenizer
UpperCamelCase = ["input_ids", "attention_mask"]
UpperCamelCase = []
def __init__( self :str , __UpperCamelCase :str=None , __UpperCamelCase :str=None , __UpperCamelCase :Tuple="<unk>" , __UpperCamelCase :Dict="<s>" , __UpperCamelCase :Any="</s>" , __UpperCamelCase :Any="<pad>" , __UpperCamelCase :Dict="[SEP]" , __UpperCamelCase :Optional[Any]="[MASK]" , __UpperCamelCase :Optional[int]="[CLS]" , **__UpperCamelCase :Optional[int] , ):
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
A = vocab_file
A = False if not self.vocab_file else True
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self :int , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Union[str, Any] = None , __UpperCamelCase :Any = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCamelCase ( self :int , __UpperCamelCase :Tuple , __UpperCamelCase :Tuple = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , __UpperCamelCase :int = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 292
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A ) -> str:
'''simple docstring'''
lowerCamelCase = 3
lowerCamelCase = 2_50
lowerCamelCase = ids_tensor((batch_size, length) , A )
lowerCamelCase = torch.ones((batch_size, length) , device=A , dtype=torch.float ) / length
return input_ids, scores
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self._get_tensors(5 )
lowerCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A , A ) )
lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A , A ) )
lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A , A ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = MaxLengthCriteria(max_length=10 )
lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(A , A ) )
lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A , A ) )
lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A , A ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(A , A ) )
lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A , A ) )
lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A , A ) )
lowerCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self._get_tensors(5 )
lowerCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A , A ) )
lowerCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A , A ) )
def __A ( self ) -> Tuple:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(A ) , 1 )
| 252
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : Optional[Any] = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Dict = 1000 ) -> List[Any]:
__a = 2**power
__a = 0
while n:
__a = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 45
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
lowercase = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
lowercase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
lowercase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.reshape(__lowerCAmelCase , (12, 5) ) ) )
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
lowercase = np.random.randn(3 , 4 , 5 )
lowercase = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.asarray(reshape(__lowerCAmelCase , (12, 5) ) ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(1 , 3 , 4 )
lowercase = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
lowercase = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(1 , 3 , 4 )
lowercase = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
lowercase = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(1 , 3 , 4 )
lowercase = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
lowercase = np.random.randn(1 , 4 , 1 , 5 )
lowercase = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A__ ( self ):
"""simple docstring"""
lowercase = np.random.randn(3 , 4 )
lowercase = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 197
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = TFViTModel(config=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE)
__a = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = TFViTForImageClassification(__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
__a = self.image_size // 2
__a = pixel_values[:, :, :image_size, :image_size]
__a = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__a = 1
__a = TFViTForImageClassification(__SCREAMING_SNAKE_CASE)
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _A ( lowercase__ ,lowercase__ ,unittest.TestCase ):
UpperCamelCase__ : int = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : Any = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : str = False
UpperCamelCase__ : Dict = False
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = TFViTModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Layer))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFViTModel.from_pretrained('''google/vit-base-patch16-224''')
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''')
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''')
# forward pass
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = tf.constant([-0.27_44, 0.82_15, -0.08_36])
tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4)
| 49
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def snake_case__ ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 214
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case__ (lowercase__ ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = "facebook/bart-large-mnli"
__lowerCAmelCase :List[str] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__lowerCAmelCase :List[str] = "text_classifier"
__lowerCAmelCase :Tuple = AutoTokenizer
__lowerCAmelCase :Tuple = AutoModelForSequenceClassification
__lowerCAmelCase :str = ["text", ["text"]]
__lowerCAmelCase :Union[str, Any] = ["text"]
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
super().setup()
a__ : Optional[int] = self.model.config
a__ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
a__ : Optional[int] = int(__lowercase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = labels
return self.pre_processor(
[text] * len(__lowercase ) , [F'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : int = outputs.logits
a__ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 170
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
from __future__ import annotations
from collections import deque
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> Any:
snake_case : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(A )
self.set_fail_transitions()
def UpperCAmelCase ( self , A , A ) -> int:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self , A ) -> Optional[int]:
snake_case : str = 0
for character in keyword:
snake_case : List[Any] = self.find_next_state(A , A )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case : int = len(self.adlist ) - 1
else:
snake_case : Optional[Any] = next_state
self.adlist[current_state]["output"].append(A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(A )
snake_case : List[str] = 0
while q:
snake_case : List[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A )
snake_case : Optional[int] = self.adlist[r]["fail_state"]
while (
self.find_next_state(A , self.adlist[child]["""value"""] ) is None
and state != 0
):
snake_case : Tuple = self.adlist[state]["fail_state"]
snake_case : Union[str, Any] = self.find_next_state(
A , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
snake_case : Dict = 0
snake_case : Dict = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : dict = {} # returns a dict with keywords and list of its occurrences
snake_case : List[Any] = 0
for i in range(len(A ) ):
while (
self.find_next_state(A , string[i] ) is None
and current_state != 0
):
snake_case : Optional[Any] = self.adlist[current_state]["fail_state"]
snake_case : Optional[Any] = self.find_next_state(A , string[i] )
if next_state is None:
snake_case : List[Any] = 0
else:
snake_case : Optional[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case : List[Any] = []
result[key].append(i - len(A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
from __future__ import annotations
def a ( A__ : List[Any] , A__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowercase =set(A__ ), [start]
while stack:
_lowercase =stack.pop()
explored.add(A__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(A__ )
return explored
lowercase_ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 205
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_A = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_A = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_A = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_A = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_A = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_A = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_A = tf.keras.preprocessing.image.img_to_array(test_image)
_A = np.expand_dims(test_image, axis=0)
_A = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_A = "Normal"
if result[0][0] == 1:
_A = "Abnormality detected"
| 242
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Dict ):
A = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
A = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
A = tempfile.mkdtemp()
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
# load decoder from hub
A = "hf-internal-testing/ngram-beam-search-decoder"
def lowerCamelCase ( self :List[str] , **__UpperCamelCase :Tuple ):
A = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , **__UpperCamelCase :int ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , **__UpperCamelCase :List[str] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self :Optional[Any] ):
A = self.get_tokenizer()
A = self.get_feature_extractor()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] ):
A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase ( self :Dict ):
A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__UpperCamelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase ( self :Tuple ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = floats_list((3, 10_00) )
A = feature_extractor(__UpperCamelCase , return_tensors="np" )
A = processor(__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase ( self :Tuple ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = "This is a test string"
A = processor(text=__UpperCamelCase )
A = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :int=(2, 10, 16) , __UpperCamelCase :Optional[Any]=77 ):
np.random.seed(__UpperCamelCase )
return np.random.rand(*__UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A = processor.decode(__UpperCamelCase )
A = decoder.decode_beams(__UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[int] ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A = processor.batch_decode(__UpperCamelCase )
else:
with get_context(__UpperCamelCase ).Pool() as pool:
A = processor.batch_decode(__UpperCamelCase , __UpperCamelCase )
A = list(__UpperCamelCase )
with get_context("fork" ).Pool() as p:
A = decoder.decode_beams_batch(__UpperCamelCase , __UpperCamelCase )
A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCamelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCamelCase , decoded_processor.lm_score )
def lowerCamelCase ( self :Dict ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits()
A = 15
A = -20.0
A = -4.0
A = processor.batch_decode(
__UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
A = decoded_processor_out.text
A = list(__UpperCamelCase )
with get_context("fork" ).Pool() as pool:
A = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , beam_width=__UpperCamelCase , beam_prune_logp=__UpperCamelCase , token_min_logp=__UpperCamelCase , )
A = [d[0][0] for d in decoded_decoder_out]
A = [d[0][2] for d in decoded_decoder_out]
A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __UpperCamelCase )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCamelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __UpperCamelCase , atol=1e-3 ) )
def lowerCamelCase ( self :int ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
A = self._get_dummy_logits()
A = 2.0
A = 5.0
A = -20.0
A = True
A = processor.batch_decode(
__UpperCamelCase , alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
A = decoded_processor_out.text
A = list(__UpperCamelCase )
decoder.reset_params(
alpha=__UpperCamelCase , beta=__UpperCamelCase , unk_score_offset=__UpperCamelCase , lm_score_boundary=__UpperCamelCase , )
with get_context("fork" ).Pool() as pool:
A = decoder.decode_beams_batch(
__UpperCamelCase , __UpperCamelCase , )
A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __UpperCamelCase )
A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = processor.decoder.model_container[processor.decoder._model_key]
A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
A = os.listdir(__UpperCamelCase )
A = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = snapshot_download("hf-internal-testing/processor_with_lm" )
A = WavaVecaProcessorWithLM.from_pretrained(__UpperCamelCase )
A = processor.decoder.model_container[processor.decoder._model_key]
A = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
A = os.listdir(__UpperCamelCase )
A = os.listdir(__UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
A = floats_list((3, 10_00) )
A = processor_wavaveca(__UpperCamelCase , return_tensors="np" )
A = processor_auto(__UpperCamelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A = self._get_dummy_logits()
A = processor_wavaveca.batch_decode(__UpperCamelCase )
A = processor_auto.batch_decode(__UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase ( self :Tuple ):
A = self.get_feature_extractor()
A = self.get_tokenizer()
A = self.get_decoder()
A = WavaVecaProcessorWithLM(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase , decoder=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def lowerCamelCase ( __UpperCamelCase :str , __UpperCamelCase :Optional[int] ):
A = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase ( self :List[Any] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = self._get_dummy_logits()[0]
A = processor.decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def lowerCamelCase ( self :List[Any] ):
A = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
A = self._get_dummy_logits()
A = processor.batch_decode(__UpperCamelCase , output_word_offsets=__UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase ( self :List[str] ):
import torch
A = load_dataset("common_voice" , "en" , split="train" , streaming=__UpperCamelCase )
A = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00 ) )
A = iter(__UpperCamelCase )
A = next(__UpperCamelCase )
A = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
A = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
A = model(__UpperCamelCase ).logits.cpu().numpy()
A = processor.decode(logits[0] , output_word_offsets=__UpperCamelCase )
A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
A = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) , __UpperCamelCase )
self.assertEqual(" ".join(self.get_from_offsets(__UpperCamelCase , "word" ) ) , output.text )
# output times
A = torch.tensor(self.get_from_offsets(__UpperCamelCase , "start_time" ) )
A = torch.tensor(self.get_from_offsets(__UpperCamelCase , "end_time" ) )
# fmt: off
A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=0.01 ) )
| 292
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase : str = "docs/source/en/_toctree.yml"
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = defaultdict(lowerCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCamelCase = [key for key, value in counts.items() if value > 1]
lowerCamelCase = []
for duplicate_key in duplicates:
lowerCamelCase = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(lowerCamelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : s["title"].lower() )
def __lowerCamelCase ( lowerCamelCase__ : List[str]=False ):
'''simple docstring'''
with open(lowerCamelCase__ , encoding="""utf-8""" ) as f:
lowerCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCamelCase = api_doc[model_idx]["sections"]
lowerCamelCase = [(idx, section) for idx, section in enumerate(lowerCamelCase__ ) if "sections" in section]
lowerCamelCase = False
for idx, modality_doc in modalities_docs:
lowerCamelCase = modality_doc["sections"]
lowerCamelCase = clean_model_doc_toc(lowerCamelCase__ )
if old_modality_doc != new_modality_doc:
lowerCamelCase = True
if overwrite:
lowerCamelCase = new_modality_doc
if diff:
if overwrite:
lowerCamelCase = model_doc
lowerCamelCase = api_doc
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowerCamelCase__ , allow_unicode=lowerCamelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 252
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A__ : Optional[int] = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
A__ : Union[str, Any] = parser.parse_args()
A__ : Optional[int] = "cpu"
A__ : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
A__ : Tuple = "path-to-your-trained-model"
A__ : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A__ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A__ : Optional[int] = pipe.to(device)
# to channels last
A__ : Dict = pipe.unet.to(memory_format=torch.channels_last)
A__ : str = pipe.vae.to(memory_format=torch.channels_last)
A__ : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A__ : Any = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A__ : Union[str, Any] = torch.randn(2, 4, 64, 64)
A__ : Optional[Any] = torch.rand(1) * 9_99
A__ : Optional[int] = torch.randn(2, 77, 7_68)
A__ : Tuple = (sample, timestep, encoder_hidden_status)
try:
A__ : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A__ : Tuple = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A__ : Optional[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A__ : Dict = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A__ : Optional[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A__ : str = 6_66
A__ : int = torch.Generator(device).manual_seed(seed)
A__ : Optional[Any] = {"generator": generator}
if args.steps is not None:
A__ : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A__ : Optional[int] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 207
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCAmelCase : Any =datasets.utils.logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] =["names", "prefix"]
__lowerCAmelCase : Dict =["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__lowerCAmelCase : Optional[Any] =["encoding_errors", "on_bad_lines"]
__lowerCAmelCase : List[Any] =["date_format"]
@dataclass
class _A ( datasets.BuilderConfig ):
snake_case__ : str = ","
snake_case__ : Optional[str] = None
snake_case__ : Optional[Union[int, List[int], str]] = "infer"
snake_case__ : Optional[List[str]] = None
snake_case__ : Optional[List[str]] = None
snake_case__ : Optional[Union[int, str, List[int], List[str]]] = None
snake_case__ : Optional[Union[List[int], List[str]]] = None
snake_case__ : Optional[str] = None
snake_case__ : bool = True
snake_case__ : Optional[Literal["c", "python", "pyarrow"]] = None
snake_case__ : Dict[Union[int, str], Callable[[Any], Any]] = None
snake_case__ : Optional[list] = None
snake_case__ : Optional[list] = None
snake_case__ : bool = False
snake_case__ : Optional[Union[int, List[int]]] = None
snake_case__ : Optional[int] = None
snake_case__ : Optional[Union[str, List[str]]] = None
snake_case__ : bool = True
snake_case__ : bool = True
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[str] = None
snake_case__ : str = "."
snake_case__ : Optional[str] = None
snake_case__ : str = '"'
snake_case__ : int = 0
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : bool = True
snake_case__ : bool = True
snake_case__ : int = 0
snake_case__ : bool = True
snake_case__ : bool = False
snake_case__ : Optional[str] = None
snake_case__ : int = 1_0000
snake_case__ : Optional[datasets.Features] = None
snake_case__ : Optional[str] = "strict"
snake_case__ : Literal["error", "warn", "skip"] = "error"
snake_case__ : Optional[str] = None
def A__ ( self ):
"""simple docstring"""
if self.delimiter is not None:
lowercase = self.delimiter
if self.column_names is not None:
lowercase = self.column_names
@property
def A__ ( self ):
"""simple docstring"""
lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _A ( datasets.ArrowBasedBuilder ):
snake_case__ : Any = CsvConfig
def A__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
lowercase = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = [files]
lowercase = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowercase = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = [files]
lowercase = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.config.features is not None:
lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase = table_cast(__lowerCAmelCase , __lowerCAmelCase )
return pa_table
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
lowercase = pd.read_csv(__lowerCAmelCase , iterator=__lowerCAmelCase , dtype=__lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCAmelCase ):
lowercase = pa.Table.from_pandas(__lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__lowerCAmelCase )}: {e}' )
raise
| 197
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__snake_case :Optional[int] = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__snake_case :Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __snake_case ( _UpperCAmelCase ):
__a = {}
with open(_UpperCAmelCase , '''r''' ) as file:
for line_number, line in enumerate(_UpperCAmelCase ):
__a = line.strip()
if line:
__a = line.split()
__a = line_number
__a = words[0]
__a = value
return result
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for attribute in key.split('''.''' ):
__a = getattr(_UpperCAmelCase , _UpperCAmelCase )
__a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
__a = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__a = "param"
if weight_type is not None and weight_type != "param":
__a = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__a = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__a = getattr(_UpperCAmelCase , _UpperCAmelCase )
__a = shape_pointer.shape
# let's reduce dimension
__a = value[0]
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__a = getattr(_UpperCAmelCase , _UpperCAmelCase )
__a = value
else:
__a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCAmelCase ):
__a = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__a = "param"
if weight_type is not None and weight_type != "param":
__a = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__a = ".".join([key, hf_param_name] )
else:
__a = key
__a = value if "lm_head" in full_key else value[0]
__snake_case :Dict = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__a = False
for key, mapped_key in MAPPING.items():
__a = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(_UpperCAmelCase )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , _UpperCAmelCase )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a = "weight"
else:
__a = None
if hf_dict is not None:
rename_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return is_used
return is_used
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
__a = load_wavaveca_layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False ):
if config_path is not None:
__a = WavaVecaConfig.from_pretrained(_UpperCAmelCase )
else:
__a = WavaVecaConfig()
if is_seq_class:
__a = read_txt_into_dict(_UpperCAmelCase )
__a = idalabel
__a = WavaVecaForSequenceClassification(_UpperCAmelCase )
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
feature_extractor.save_pretrained(_UpperCAmelCase )
elif is_finetuned:
if dict_path:
__a = Dictionary.load(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(_UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
__a = target_dict.indices
# fairseq has the <pad> and <s> switched
__a = 0
__a = 1
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
__a = WavaVecaCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCAmelCase , )
__a = True if config.feat_extract_norm == "layer" else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
__a = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
__a = WavaVecaForCTC(_UpperCAmelCase )
else:
__a = WavaVecaForPreTraining(_UpperCAmelCase )
if is_finetuned or is_seq_class:
__a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__a = argparse.Namespace(task='''audio_pretraining''' )
__a = fairseq.tasks.setup_task(_UpperCAmelCase )
__a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCAmelCase )
__a = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__snake_case :List[str] = parser.parse_args()
__snake_case :int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 49
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def snake_case_ ( *a , **a):
pass
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
__lowerCamelCase : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def snake_case_ ( self , a , a , a):
lowercase__ : str = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
lowercase__ : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def snake_case_ ( self , a , a):
lowercase__ : List[Any] = object_detector(examples[0] , threshold=0.0)
lowercase__ : Dict = len(a)
self.assertGreater(a , 0)
self.assertEqual(
a , [
{
'score': ANY(a),
'label': ANY(a),
'box': {'xmin': ANY(a), 'ymin': ANY(a), 'xmax': ANY(a), 'ymax': ANY(a)},
}
for i in range(a)
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def snake_case_ ( self):
pass
@require_torch
def snake_case_ ( self):
lowercase__ : Optional[Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
lowercase__ : Optional[Any] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(a , decimals=4) , [
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
lowercase__ : Tuple = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(a , decimals=4) , [
[
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def snake_case_ ( self):
lowercase__ : Tuple = pipeline('zero-shot-object-detection')
lowercase__ : Optional[int] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(a , decimals=4) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
lowercase__ : Union[str, Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(a , decimals=4) , [
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def snake_case_ ( self):
pass
@require_torch
@slow
def snake_case_ ( self):
lowercase__ : Any = 0.2
lowercase__ : Union[str, Any] = pipeline('zero-shot-object-detection')
lowercase__ : str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=a , )
self.assertEqual(
nested_simplify(a , decimals=4) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def snake_case_ ( self):
lowercase__ : Dict = 2
lowercase__ : Optional[Any] = pipeline('zero-shot-object-detection')
lowercase__ : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=a , )
self.assertEqual(
nested_simplify(a , decimals=4) , [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 214
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowercase : Dict =get_logger(__name__)
_lowercase : Optional[Any] =R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class snake_case__ :
"""simple docstring"""
@add_start_docstrings(__lowercase )
def __call__( self , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case__ :
"""simple docstring"""
@add_start_docstrings(__lowercase )
def __call__( self , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class snake_case__ (lowercase__ ):
"""simple docstring"""
@add_start_docstrings(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
for processor in self:
a__ : str = inspect.signature(processor.__call__ ).parameters
if len(__lowercase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
a__ : Tuple = processor(__lowercase , __lowercase , __lowercase , **__lowercase )
else:
a__ : Optional[Any] = processor(__lowercase , __lowercase , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
a__ : Any = temperature
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : int = scores / self.temperature
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase = -float("""Inf""" ) , __lowercase = 1 ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__lowercase , __lowercase ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
a__ : Optional[int] = top_p
a__ : Optional[int] = filter_value
a__ : Tuple = min_tokens_to_keep
def __call__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : List[str] = lax.top_k(__lowercase , scores.shape[-1] )
a__ : Union[str, Any] = jnp.full_like(__lowercase , self.filter_value )
a__ : Optional[int] = jax.nn.softmax(__lowercase , axis=-1 ).cumsum(axis=-1 )
a__ : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
a__ : Optional[Any] = jnp.roll(__lowercase , 1 )
score_mask |= score_mask.at[:, 0].set(__lowercase )
# min tokens to keep
a__ : Any = score_mask.at[:, : self.min_tokens_to_keep].set(__lowercase )
a__ : Union[str, Any] = jnp.where(__lowercase , __lowercase , __lowercase )
a__ : int = jax.lax.sort_key_val(__lowercase , __lowercase )[-1]
return next_scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase = -float("""Inf""" ) , __lowercase = 1 ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
a__ : List[Any] = max(__lowercase , __lowercase )
a__ : Tuple = filter_value
def __call__( self , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : int = scores.shape
a__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
a__ : List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
a__ : Union[str, Any] = lax.top_k(__lowercase , __lowercase )
a__ : Union[str, Any] = jnp.broadcast_to((jnp.arange(__lowercase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
a__ : Tuple = topk_scores.flatten()
a__ : List[Any] = topk_indices.flatten() + shift
a__ : List[str] = next_scores_flat.at[topk_indices_flat].set(__lowercase )
a__ : int = next_scores_flat.reshape(__lowercase , __lowercase )
return next_scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = bos_token_id
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : str = jnp.full(scores.shape , -float("""inf""" ) )
a__ : List[Any] = 1 - jnp.bool_(cur_len - 1 )
a__ : Dict = jnp.where(__lowercase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = max_length
a__ : List[str] = eos_token_id
def __call__( self , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = jnp.full(scores.shape , -float("""inf""" ) )
a__ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
a__ : str = jnp.where(__lowercase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__lowercase , __lowercase ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
a__ : Optional[Any] = min_length
a__ : List[str] = eos_token_id
def __call__( self , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
a__ : Tuple = jnp.where(__lowercase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = list(__lowercase )
a__ : List[str] = begin_index
def __call__( self , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = 1 - jnp.bool_(cur_len - self.begin_index )
a__ : str = jnp.where(__lowercase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , __lowercase )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Any:
"""simple docstring"""
a__ : List[Any] = list(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : str = dict(__lowercase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
a__ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
a__ : str = force_token_array.at[index].set(__lowercase )
a__ : List[Any] = jnp.intaa(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
def _force_token(__lowercase ):
a__ : Optional[int] = scores.shape[0]
a__ : Optional[int] = self.force_token_array[generation_idx]
a__ : Tuple = jnp.ones_like(__lowercase , dtype=scores.dtype ) * -float("""inf""" )
a__ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
a__ : Optional[int] = lax.dynamic_update_slice(__lowercase , __lowercase , (0, current_token) )
return new_scores
a__ : Union[str, Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowercase ) , lambda: scores , ) , )
return scores
class snake_case__ (lowercase__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Any = generate_config.eos_token_id
a__ : int = generate_config.no_timestamps_token_id
a__ : List[str] = generate_config.no_timestamps_token_id + 1
a__ : str = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__lowercase , """max_initial_timestamp_index""" ):
a__ : str = generate_config.max_initial_timestamp_index
else:
a__ : List[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
a__ : Optional[Any] = model_config.vocab_size
def __call__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : Tuple = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(__lowercase , __lowercase ):
a__ : List[str] = jnp.where((cur_len - self.begin_index) >= 1 , __lowercase , __lowercase )
a__ : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowercase , )
a__ : Any = jnp.where((cur_len - self.begin_index) < 2 , __lowercase , __lowercase )
a__ : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowercase , __lowercase , )
return jnp.where(
__lowercase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , __lowercase , )
a__ : Dict = jax.vmap(__lowercase )(__lowercase , __lowercase )
a__ : Optional[int] = jnp.where(cur_len == self.begin_index , __lowercase , __lowercase )
a__ : Optional[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowercase , )
a__ : Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index
a__ : Tuple = jnp.where(
__lowercase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , __lowercase , )
# if sum of probability over timestamps is above any other token, sample timestamp
a__ : List[Any] = jax.nn.log_softmax(__lowercase , axis=-1 )
def handle_cumulative_probs(__lowercase , __lowercase ):
a__ : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
a__ : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , __lowercase , )
a__ : Union[str, Any] = jax.vmap(__lowercase )(__lowercase , __lowercase )
return scores
| 170
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCamelCase : str = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def SCREAMING_SNAKE_CASE__ ( lowercase = "mumbai" ) -> Optional[Any]:
snake_case : Optional[int] = BeautifulSoup(requests.get(url + location ).content ,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" ,attrs={"""data-tn-component""": """organicJob"""} ):
snake_case : Dict = job.find("""a""" ,attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
snake_case : Tuple = job.find("""span""" ,{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 124
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a : str = getLogger(__name__)
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 8 , __magic_name__ = 1024 , __magic_name__="val" , __magic_name__=None , __magic_name__=False , __magic_name__="summarization" , __magic_name__=None , __magic_name__=1 , __magic_name__ = None , __magic_name__="" , **__magic_name__ , ):
'''simple docstring'''
UpperCAmelCase : List[Any] = str(__magic_name__ )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__magic_name__ )
UpperCAmelCase : List[str] = Path(__magic_name__ )
UpperCAmelCase : Dict = save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__magic_name__ )
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).cuda()
if fpaa:
UpperCAmelCase : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__magic_name__ , __magic_name__ ) # update config with task specific params
UpperCAmelCase : Dict = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase : Optional[Any] = num_return_sequences
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__magic_name__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase : Tuple = prefix or getattr(model.config , "prefix" , "" ) or ""
UpperCAmelCase : Dict = SeqaSeqDataset(
__magic_name__ , __magic_name__ , __magic_name__ , max_target_length=1024 , type_path=__magic_name__ , n_obs=__magic_name__ , prefix=__magic_name__ , **__magic_name__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase : int = ds.make_sortish_sampler(__magic_name__ , distributed=__magic_name__ , add_extra_examples=__magic_name__ , shuffle=__magic_name__ )
UpperCAmelCase : List[Any] = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=__magic_name__ , collate_fn=ds.collate_fn )
UpperCAmelCase : Any = []
for batch in tqdm(__magic_name__ ):
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , **__magic_name__ , )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
UpperCAmelCase : List[Any] = chunks(__magic_name__ , __magic_name__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__magic_name__ ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__magic_name__ , __magic_name__ )
return results, sampler.num_replicas
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__magic_name__ , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__magic_name__ , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__magic_name__ , default=__magic_name__ )
parser.add_argument(
"--type_path" , type=__magic_name__ , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__magic_name__ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__magic_name__ , default=8 , required=__magic_name__ , help="batch size" )
parser.add_argument(
"--local_rank" , type=__magic_name__ , default=-1 , required=__magic_name__ , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__magic_name__ , default=1 , required=__magic_name__ , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__magic_name__ , default=600 , required=__magic_name__ , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument("--tgt_lang" , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
"--prefix" , type=__magic_name__ , required=__magic_name__ , default=__magic_name__ , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase , UpperCAmelCase : Dict = parser.parse_known_args()
UpperCAmelCase : Tuple = parse_numeric_n_bool_cl_kwargs(__magic_name__ )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
UpperCAmelCase : Union[str, Any] = Path(args.save_dir + "_tmp" )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) # this handles locking.
UpperCAmelCase : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase : Optional[Any] = {}
if args.src_lang is not None:
UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__magic_name__ )
UpperCAmelCase , UpperCAmelCase : str = eval_data_dir(
args.data_dir , __magic_name__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__magic_name__ , **__magic_name__ , )
if args.local_rank <= 0:
UpperCAmelCase : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=__magic_name__ )
UpperCAmelCase : str = gather_results_from_each_node(__magic_name__ , __magic_name__ , args.sync_timeout )
UpperCAmelCase : Dict = combine_partial_results(__magic_name__ )
if args.num_return_sequences > 1:
UpperCAmelCase : int = save_dir.joinpath("pseudolabel_results.json" )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__magic_name__ , __magic_name__ )
return
UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__magic_name__ ) as f:
UpperCAmelCase : Dict = [x.rstrip() for x in f.readlines()][: len(__magic_name__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase : Optional[int] = "translation" in args.task
UpperCAmelCase : str = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase : Tuple = "bleu" if calc_bleu else "rouge"
UpperCAmelCase : Dict = score_fn(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = len(__magic_name__ )
UpperCAmelCase : Union[str, Any] = time.time() - start_time
UpperCAmelCase : Dict = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase : Dict = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__magic_name__ , __magic_name__ , indent=__magic_name__ )
print(__magic_name__ )
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__magic_name__ , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__magic_name__ )
UpperCAmelCase : Optional[Any] = sorted(__magic_name__ , key=lambda __magic_name__ : x["id"] )
UpperCAmelCase : List[Any] = [x["pred"] for x in records]
return preds
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase : Dict = list(save_dir.glob("rank_*.json" ) )
if len(__magic_name__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase : List[str] = lmap(__magic_name__ , __magic_name__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 311
| 0
|
def a ( A__ : Dict , A__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(A__ , x % y )
def a ( A__ : List[str] , A__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return (x * y) // greatest_common_divisor(A__ , A__ )
def a ( A__ : int = 20 ) -> Any:
"""simple docstring"""
_lowercase =1
for i in range(1 , n + 1 ):
_lowercase =lcm(A__ , A__ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 205
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def lowercase_ ( __UpperCAmelCase ) -> Any:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_A = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def lowercase_ ( __UpperCAmelCase ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCAmelCase__ : int = []
for num in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ : int = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ : int = odd_composites[num] - 2 * i * i
if is_prime(__UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCAmelCase ) == n:
return list_nums
return []
def lowercase_ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 242
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A__ ( UpperCamelCase ):
return x + 2
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :int ):
A = "x = 3"
A = {}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3} )
A = "x = y"
A = {"y": 5}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 5, "y": 5} )
def lowerCamelCase ( self :Optional[int] ):
A = "y = add_two(x)"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase ( self :Any ):
A = "x = 3"
A = {}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3} )
def lowerCamelCase ( self :Dict ):
A = "test_dict = {'x': x, 'y': add_two(x)}"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCamelCase ( self :Union[str, Any] ):
A = "x = 3\ny = 5"
A = {}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
def lowerCamelCase ( self :Union[str, Any] ):
A = "text = f'This is x: {x}.'"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCamelCase , {"x": 3, "text": "This is x: 3."} )
def lowerCamelCase ( self :List[str] ):
A = "if x <= 3:\n y = 2\nelse:\n y = 5"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 2} )
A = {"x": 8}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 8, "y": 5} )
def lowerCamelCase ( self :List[Any] ):
A = "test_list = [x, add_two(x)]"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [3, 5] )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_list": [3, 5]} )
def lowerCamelCase ( self :Dict ):
A = "y = x"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 3} )
def lowerCamelCase ( self :List[Any] ):
A = "test_list = [x, add_two(x)]\ntest_list[1]"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_list": [3, 5]} )
A = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
A = {"x": 3}
A = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCamelCase ( self :Optional[int] ):
A = "x = 0\nfor i in range(3):\n x = i"
A = {}
A = evaluate(__UpperCamelCase , {"range": range} , state=__UpperCamelCase )
assert result == 2
self.assertDictEqual(__UpperCamelCase , {"x": 2, "i": 2} )
| 292
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311
| 0
|
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = data
lowerCamelCase = None
lowerCamelCase = None
def __lowerCamelCase ( lowerCamelCase__ : Dict ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowerCamelCase ( ): # Main function for testing.
'''simple docstring'''
lowerCamelCase = Node(1 )
lowerCamelCase = Node(2 )
lowerCamelCase = Node(3 )
lowerCamelCase = Node(4 )
lowerCamelCase = Node(5 )
lowerCamelCase = Node(6 )
lowerCamelCase = Node(7 )
lowerCamelCase = Node(8 )
lowerCamelCase = Node(9 )
print(is_full_binary_tree(lowerCamelCase__ ) )
print(depth_of_tree(lowerCamelCase__ ) )
print("""Tree is: """ )
display(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 252
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if hor == 128:
lowercase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ = (32, 128, 256)
lowercase__ = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowercase__ = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ = (32, 64, 128, 256)
lowercase__ = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowercase__ = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
lowercase__ = model.state_dict()
lowercase__ = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowercase__ = UNetaDModel(**lowerCamelCase_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowercase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase__ = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowercase__ = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
lowercase__ = model
lowercase__ = UNetaDModel(**lowerCamelCase_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
lowercase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowercase__ = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 207
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : int = datasets.load_iris()
a : Union[str, Any] = np.array(data["data"])
a : Optional[Any] = np.array(data["target"])
a : List[Any] = data["target_names"]
a , a , a , a : Dict = train_test_split(X, y)
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return np.linalg.norm(np.array(__magic_name__ ) - np.array(__magic_name__ ) )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=5 ):
'''simple docstring'''
UpperCAmelCase : int = zip(__magic_name__ , __magic_name__ )
# List of distances of all points from the point to be classified
UpperCAmelCase : List[Any] = []
for data_point in data:
UpperCAmelCase : List[str] = euclidean_distance(data_point[0] , __magic_name__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(__magic_name__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase : List[str] = Counter(__magic_name__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 311
| 0
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = get_tests_dir("fixtures/dummy-config.json")
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = 0
def __UpperCAmelCase ( self ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __UpperCAmelCase ( self ):
__a = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__a = os.path.join(_a , '''fake-roberta''' )
os.makedirs(_a , exist_ok=_a )
with open(os.path.join(_a , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
__a = AutoConfig.from_pretrained(_a )
self.assertEqual(type(_a ) , _a )
def __UpperCAmelCase ( self ):
try:
AutoConfig.register('''custom''' , _a )
# Wrong model type will raise an error
with self.assertRaises(_a ):
AutoConfig.register('''model''' , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoConfig.register('''bert''' , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
__a = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
__a = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCAmelCase ( self ):
with self.assertRaisesRegex(
_a , '''bert-base is not a local folder and is not a valid model identifier''' ):
__a = AutoConfig.from_pretrained('''bert-base''' )
def __UpperCAmelCase ( self ):
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__a = AutoConfig.from_pretrained(_a , revision='''aaaaaa''' )
def __UpperCAmelCase ( self ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
__a = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __UpperCAmelCase ( self ):
with self.assertRaises(_a ):
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
__a = AutoConfig.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __UpperCAmelCase ( self ):
class __lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
__UpperCAmelCase : str = "new-model"
try:
AutoConfig.register('''new-model''' , _a )
# If remote code is not set, the default is to use local
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 45
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( lowercase__ ):
snake_case__ : Optional[int] = (DDIMParallelScheduler,)
snake_case__ : Optional[Any] = (("eta", 0.0), ("num_inference_steps", 50))
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10, 0.0
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for t in scheduler.timesteps:
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(steps_offset=1 )
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def A__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCAmelCase , eta=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10, 0.0
scheduler.set_timesteps(__lowerCAmelCase )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = self.dummy_sample_deter + 0.1
lowercase = self.dummy_sample_deter - 0.1
lowercase = samplea.shape[0]
lowercase = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase = torch.arange(__lowerCAmelCase )[0:3, None].repeat(1 , __lowerCAmelCase )
lowercase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase = scheduler.batch_step_no_noise(__lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCAmelCase )
lowercase = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop()
lowercase = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop(prediction_type="""v_prediction""" )
lowercase = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop(set_alpha_to_one=__lowerCAmelCase , beta_start=0.0_1 )
lowercase = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop(set_alpha_to_one=__lowerCAmelCase , beta_start=0.0_1 )
lowercase = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 197
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
for _ in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( __magic_name__ , __magic_name__=10 ):
'''simple docstring'''
UpperCAmelCase : List[str] = []
for step in range(__magic_name__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = os.path.join(__magic_name__ , "schedule.bin" )
torch.save(scheduler.state_dict() , __magic_name__ )
UpperCAmelCase : Any = torch.load(__magic_name__ )
scheduler.load_state_dict(__magic_name__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : List[str] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
UpperCAmelCase : List[Any] = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case )
UpperCAmelCase : int = torch.tensor([0.4, 0.2, -0.5] )
UpperCAmelCase : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCAmelCase : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case , weight_decay=0.0 , relative_step=snake_case , scale_parameter=snake_case , warmup_init=snake_case , )
for _ in range(1_0_0_0 ):
UpperCAmelCase : str = criterion(snake_case , snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : List[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
def A_ ( self , snake_case , snake_case , snake_case , snake_case=None ):
'''simple docstring'''
self.assertEqual(len(snake_case ) , len(snake_case ) )
for a, b in zip(snake_case , snake_case ):
self.assertAlmostEqual(snake_case , snake_case , delta=snake_case , msg=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = {"num_warmup_steps": 2, "num_training_steps": 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCAmelCase : int = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCAmelCase , UpperCAmelCase : Any = data
UpperCAmelCase : Tuple = scheduler_func(self.optimizer , **snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCAmelCase : List[str] = unwrap_schedule(snake_case , self.num_steps )
self.assertListAlmostEqual(
snake_case , snake_case , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
UpperCAmelCase : Optional[Any] = scheduler_func(self.optimizer , **snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case ) # wrap to test picklability of the schedule
UpperCAmelCase : Tuple = unwrap_and_save_reload_schedule(snake_case , self.num_steps )
self.assertListEqual(snake_case , snake_case , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = fn
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.fn(*snake_case , **snake_case )
@classmethod
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = list(map(self , scheduler.lr_lambdas ) )
| 311
| 0
|
from collections.abc import Callable
import numpy as np
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = int(np.ceil((x_end - xa) / step_size ) )
__a = np.zeros((n + 1,) )
__a = ya
__a = xa
for k in range(_UpperCAmelCase ):
__a = y[k] + step_size * ode_func(_UpperCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ (lowercase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = DebertaVaTokenizer
__lowerCamelCase : Optional[int] = DebertaVaTokenizerFast
__lowerCamelCase : str = True
__lowerCamelCase : List[Any] = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Dict = DebertaVaTokenizer(a , unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self , a):
lowercase__ : List[str] = "this is a test"
lowercase__ : str = "this is a test"
return input_text, output_text
def snake_case_ ( self):
lowercase__ : Dict = "<pad>"
lowercase__ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def snake_case_ ( self):
lowercase__ : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '[PAD]')
self.assertEqual(len(a) , 3_0001)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000)
def snake_case_ ( self):
lowercase__ : Union[str, Any] = " \tHeLLo!how \n Are yoU? "
lowercase__ : Dict = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
lowercase__ : Dict = DebertaVaTokenizer(a , do_lower_case=a)
lowercase__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
lowercase__ : str = DebertaVaTokenizerFast(a , do_lower_case=a)
lowercase__ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def snake_case_ ( self):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def snake_case_ ( self):
pass
def snake_case_ ( self):
lowercase__ : List[Any] = "I was born in 92000, and this is falsé."
lowercase__ : Dict = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase__ : List[str] = DebertaVaTokenizer(a , split_by_punct=a)
lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
lowercase__ : List[str] = DebertaVaTokenizerFast(a , split_by_punct=a)
lowercase__ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : Optional[int] = "I was born in 92000, and this is falsé."
lowercase__ : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase__ : int = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a)
lowercase__ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
lowercase__ : Dict = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a)
lowercase__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : Dict = "I was born in 92000, and this is falsé."
lowercase__ : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
lowercase__ : int = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a)
lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
lowercase__ : int = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a)
lowercase__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : Dict = "I was born in 92000, and this is falsé."
lowercase__ : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase__ : Optional[Any] = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a)
lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
lowercase__ : List[str] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a)
lowercase__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : Union[str, Any] = " \tHeLLo!how \n Are yoU? "
lowercase__ : Any = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
lowercase__ : Tuple = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a)
lowercase__ : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
lowercase__ : List[str] = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a)
lowercase__ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : str = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : Dict = "I was born in 92000, and this is falsé."
lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a))
lowercase__ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a))
self.assertListEqual(a , a)
lowercase__ : Tuple = tokenizer.encode(a , add_special_tokens=a)
lowercase__ : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
lowercase__ : int = self.get_rust_tokenizer()
lowercase__ : Tuple = tokenizer.encode(a)
lowercase__ : List[Any] = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : List[Any] = "This is a test"
lowercase__ : Tuple = [13, 1, 4398, 25, 21, 1289]
lowercase__ : Any = ["▁", "T", "his", "▁is", "▁a", "▁test"]
lowercase__ : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
lowercase__ : Dict = DebertaVaTokenizer(a , keep_accents=a)
lowercase__ : List[str] = DebertaVaTokenizerFast(a , keep_accents=a)
lowercase__ : str = tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
lowercase__ : Optional[Any] = tokenizer.tokenize(a)
self.assertListEqual(a , a)
lowercase__ : Tuple = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(a , a)
lowercase__ : Dict = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
lowercase__ : Optional[int] = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
lowercase__ : Dict = rust_tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(a , a)
# fmt: off
lowercase__ : Tuple = "I was born in 92000, and this is falsé."
lowercase__ : List[str] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ : List[Any] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
lowercase__ : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
lowercase__ : List[str] = tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
lowercase__ : int = tokenizer.tokenize(a)
self.assertListEqual(a , a)
lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(a , a)
lowercase__ : Tuple = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
lowercase__ : List[str] = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
lowercase__ : str = rust_tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(a , a)
def snake_case_ ( self):
lowercase__ : Tuple = DebertaVaTokenizer(a)
lowercase__ : str = tokenizer.encode('sequence builders')
lowercase__ : List[str] = tokenizer.encode('multi-sequence build')
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(a)
lowercase__ : Dict = tokenizer.build_inputs_with_special_tokens(a , a)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , )
@slow
def snake_case_ ( self):
lowercase__ : Optional[Any] = {"input_ids": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 214
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def A_ ( self , snake_case=None , snake_case=None , snake_case=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case , snake_case ):
UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311
| 0
|
from collections import defaultdict
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
a__ : Tuple = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__lowercase ) )
]
a__ : Union[str, Any] = defaultdict(__lowercase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
a__ : Tuple = (1 << len(__lowercase )) - 1
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Optional[Any]:
"""simple docstring"""
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
a__ : List[Any] = self.count_ways_until(__lowercase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
a__ : Optional[int] = total_ways_util
return self.dp[mask][task_no]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
for i in range(len(__lowercase ) ):
for j in task_performed[i]:
self.task[j].append(__lowercase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_lowercase : Dict =5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Any =[[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 170
|
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311
| 0
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowercase ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : List[Any] = _distribute_shards(**lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : str = _split_gen_kwargs(lowercase ,lowercase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
if expected is RuntimeError:
with pytest.raises(lowercase ):
_number_of_shards_in_gen_kwargs(lowercase )
else:
snake_case : str = _number_of_shards_in_gen_kwargs(lowercase )
assert out == expected
| 124
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def a ( A__ : List[str] ) -> Dict:
"""simple docstring"""
_lowercase =MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
_lowercase =re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , A__ )
if matches:
_lowercase =float(matches[1] )
_lowercase =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowercase =1001
_lowercase ="imagenet-1k-id2label.json"
_lowercase ="huggingface/label-files"
_lowercase =json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
_lowercase ={int(A__ ) + 1: v for k, v in idalabel.items()}
_lowercase ="background"
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def a ( ) -> Dict:
"""simple docstring"""
_lowercase ="http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase =Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def a ( A__ : int , A__ : int , A__ : List[str] , A__ : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
_lowercase =get_mobilenet_va_config(A__ )
# Load 🤗 model
_lowercase =MobileNetVaForImageClassification(A__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A__ , A__ , A__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowercase =MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
_lowercase =image_processor(images=prepare_img() , return_tensors='pt' )
_lowercase =model(**A__ )
_lowercase =outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
_lowercase =torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowercase =torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_lowercase =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A__ , atol=1e-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print('Pushing to the hub...' )
_lowercase ="google/" + model_name
image_processor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 205
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a : Tuple = re.compile(R"([A-Z]+)([A-Z][a-z])")
a : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
a : str = re.compile(R"(?<!_)_(?!_)")
a : List[Any] = re.compile(R"(_{2,})")
a : List[Any] = R"^\w+(\.\w+)*$"
a : Dict = R"<>:/\|?*"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = _uppercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
UpperCAmelCase : List[str] = _lowercase_uppercase_re.sub(R"\1_\2" , __magic_name__ )
return name.lower()
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = _single_underscore_re.split(__magic_name__ )
UpperCAmelCase : Union[str, Any] = [_multiple_underscores_re.split(__magic_name__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__magic_name__ ) if n != "" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if os.path.basename(__magic_name__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __magic_name__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__magic_name__ )}-{split}"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
return F"{filepath}*"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : List[str] = filename_prefix_for_split(__magic_name__ , __magic_name__ )
UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ )
if shard_lengths:
UpperCAmelCase : Tuple = len(__magic_name__ )
UpperCAmelCase : Optional[int] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__magic_name__ )]
if filetype_suffix:
UpperCAmelCase : Optional[int] = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase : int = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 311
| 0
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Dict = np.inf
def set_batch_size(__UpperCAmelCase ) -> None:
nonlocal batch_size
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Any = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and feature.dtype == "binary":
lowerCAmelCase__ : Optional[int] = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCAmelCase , __UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class _lowerCamelCase ( lowercase__ ):
def __init__( self : str , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] = None , UpperCamelCase : str = None , UpperCamelCase : Any = None , UpperCamelCase : Optional[Any] = False , UpperCamelCase : Dict = False , UpperCamelCase : str = None , **UpperCamelCase : Dict , ) -> Dict:
"""simple docstring"""
super().__init__(
UpperCamelCase , split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , num_proc=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = path_or_paths if isinstance(UpperCamelCase , UpperCamelCase ) else {self.split: path_or_paths}
lowerCAmelCase__ : str = _PACKAGED_DATASETS_MODULES["parquet"][1]
lowerCAmelCase__ : List[Any] = Parquet(
cache_dir=UpperCamelCase , data_files=UpperCamelCase , features=UpperCamelCase , hash=UpperCamelCase , **UpperCamelCase , )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
if self.streaming:
lowerCAmelCase__ : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : int = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , num_proc=self.num_proc , )
lowerCAmelCase__ : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__( self : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Optional[int] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = dataset
lowerCAmelCase__ : Optional[int] = path_or_buf
lowerCAmelCase__ : int = batch_size or get_writer_batch_size(dataset.features )
lowerCAmelCase__ : Dict = parquet_writer_kwargs
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
lowerCAmelCase__ : List[str] = self._write(file_obj=UpperCamelCase , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
else:
lowerCAmelCase__ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
return written
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , **UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = parquet_writer_kwargs.pop("""path_or_buf""" , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.dataset.features.arrow_schema
lowerCAmelCase__ : Any = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase , **UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
lowerCAmelCase__ : List[Any] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 242
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311
| 0
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _UpperCAmelCase :
def __init__( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any]=13 , __UpperCamelCase :Tuple=7 , __UpperCamelCase :int=True , __UpperCamelCase :List[Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Dict=True , __UpperCamelCase :str=99 , __UpperCamelCase :Optional[int]=64 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Any=4 , __UpperCamelCase :Optional[Any]=37 , __UpperCamelCase :List[str]="gelu" , __UpperCamelCase :Any=0.1 , __UpperCamelCase :Dict=0.1 , __UpperCamelCase :List[str]=5_12 , __UpperCamelCase :Tuple=16 , __UpperCamelCase :Union[str, Any]=2 , __UpperCamelCase :List[str]=0.02 , __UpperCamelCase :Dict=3 , __UpperCamelCase :Dict=4 , __UpperCamelCase :Tuple=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = embedding_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def lowerCamelCase ( self :str ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self :Optional[int] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[int] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :Any , __UpperCamelCase :Any , __UpperCamelCase :Optional[int] , __UpperCamelCase :Tuple ):
A = MegatronBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self :Any , __UpperCamelCase :Dict , __UpperCamelCase :Tuple , __UpperCamelCase :List[Any] , __UpperCamelCase :str , __UpperCamelCase :Optional[int] , __UpperCamelCase :int , __UpperCamelCase :Optional[Any] ):
A = MegatronBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :int , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[Any] ):
A = MegatronBertForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self :int , __UpperCamelCase :Tuple , __UpperCamelCase :Tuple , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :str , __UpperCamelCase :str ):
A = MegatronBertForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :int , __UpperCamelCase :int , __UpperCamelCase :int , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] ):
A = MegatronBertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[Any] ):
A = MegatronBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :int , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[Any] ):
A = self.num_labels
A = MegatronBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :str , __UpperCamelCase :int , __UpperCamelCase :List[str] , __UpperCamelCase :int , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] ):
A = self.num_labels
A = MegatronBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :int , __UpperCamelCase :str , __UpperCamelCase :int , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
A = self.num_choices
A = MegatronBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self :List[str] ):
A = self.prepare_config_and_inputs()
(
A
) = config_and_inputs
A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
UpperCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
# test_resize_embeddings = False
UpperCamelCase = False
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :int , __UpperCamelCase :Tuple , __UpperCamelCase :int=False ):
A = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def lowerCamelCase ( self :Any ):
A = MegatronBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :int ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCamelCase )
def lowerCamelCase ( self :str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCamelCase )
def lowerCamelCase ( self :List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCamelCase )
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCamelCase )
def A__ ( UpperCamelCase ):
return torch.tensor(
UpperCamelCase , dtype=torch.long , device=UpperCamelCase , )
_snake_case : Tuple = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def lowerCamelCase ( self :Tuple ):
A = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
A = os.path.join(os.environ["MYDIR"] , __UpperCamelCase )
A = MegatronBertModel.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
model.half()
A = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
A = model(__UpperCamelCase )[0]
A = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __UpperCamelCase )
A = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
A = output[0, ii, jj]
A = expected[3 * ii + jj]
A = "ii={} jj={} a={} b={}".format(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.assertTrue(math.isclose(__UpperCamelCase , __UpperCamelCase , rel_tol=__UpperCamelCase , abs_tol=__UpperCamelCase ) , msg=__UpperCamelCase )
| 292
|
'''simple docstring'''
import argparse
import copy
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Tuple = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : Any = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
with open(__magic_name__ ) as f:
UpperCAmelCase : List[str] = f.read(1 )
UpperCAmelCase : List[Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = start_node
UpperCAmelCase : Optional[Any] = 0
while visiting not in first_solution:
UpperCAmelCase : Optional[Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
UpperCAmelCase : Tuple = k[1]
UpperCAmelCase : Dict = k[0]
first_solution.append(__magic_name__ )
UpperCAmelCase : int = distance_of_first_solution + int(__magic_name__ )
UpperCAmelCase : str = best_node
first_solution.append(__magic_name__ )
UpperCAmelCase : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for n in solution[1:-1]:
UpperCAmelCase : Any = solution.index(__magic_name__ )
for kn in solution[1:-1]:
UpperCAmelCase : Dict = solution.index(__magic_name__ )
if n == kn:
continue
UpperCAmelCase : Tuple = copy.deepcopy(__magic_name__ )
UpperCAmelCase : Optional[int] = kn
UpperCAmelCase : List[str] = n
UpperCAmelCase : str = 0
for k in _tmp[:-1]:
UpperCAmelCase : List[Any] = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = first_solution
UpperCAmelCase : str = []
UpperCAmelCase : Union[str, Any] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : int = find_neighborhood(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution]
UpperCAmelCase : Dict = len(__magic_name__ ) - 1
UpperCAmelCase : Dict = False
while not found:
UpperCAmelCase : List[Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
UpperCAmelCase : int = best_solution[i]
UpperCAmelCase : Optional[int] = solution[i]
break
UpperCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = best_solution[:-1]
UpperCAmelCase : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Union[str, Any] = cost
UpperCAmelCase : Tuple = solution
else:
UpperCAmelCase : Optional[Any] = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : int = count + 1
return best_solution_ever, best_cost
def lowercase ( __magic_name__=None ):
'''simple docstring'''
UpperCAmelCase : Dict = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , __magic_name__ )
UpperCAmelCase , UpperCAmelCase : Any = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 311
| 0
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Union[str, Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCamelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCamelCase = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
lowerCamelCase = config_class.from_json_file(lowerCamelCase__ )
lowerCamelCase = True
lowerCamelCase = True
print(f'Building TensorFlow model from configuration: {config}' )
lowerCamelCase = model_class(lowerCamelCase__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCamelCase = cached_file(
lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCamelCase = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase__ , lowerCamelCase__ )
if compare_with_pt_model:
lowerCamelCase = tf_model(tf_model.dummy_inputs , training=lowerCamelCase__ ) # build the network
lowerCamelCase = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowerCamelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ )
with torch.no_grad():
lowerCamelCase = pt_model(**pt_model.dummy_inputs )
lowerCamelCase = pto[0].numpy()
lowerCamelCase = tfo[0].numpy()
lowerCamelCase = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(lowerCamelCase__ , save_format="""h5""" )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : int=False , lowerCamelCase__ : int=False , lowerCamelCase__ : int=False , lowerCamelCase__ : List[str]=False , ):
'''simple docstring'''
if args_model_type is None:
lowerCamelCase = list(MODEL_CLASSES.keys() )
else:
lowerCamelCase = [args_model_type]
for j, model_type in enumerate(lowerCamelCase__ , start=1 ):
print("""=""" * 100 )
print(f' Converting model type {j}/{len(lowerCamelCase__ )}: {model_type}' )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCamelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCamelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCamelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase__ , lowerCamelCase__ ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
lowerCamelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(lowerCamelCase__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
lowerCamelCase = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
else:
lowerCamelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCamelCase = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
else:
lowerCamelCase = model_shortcut_name
if os.path.isfile(lowerCamelCase__ ):
lowerCamelCase = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase__ , pytorch_checkpoint_path=lowerCamelCase__ , config_file=lowerCamelCase__ , tf_dump_path=os.path.join(lowerCamelCase__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=lowerCamelCase__ , )
if remove_cached_files:
os.remove(lowerCamelCase__ )
os.remove(lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
UpperCAmelCase : Tuple = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 252
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( lowercase__ ):
"""simple docstring"""
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def lowercase__ ( self : List[Any], **lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = 10
lowercase__ = self.get_scheduler_config()
lowercase__ = self.scheduler_classes[0](**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
lowercase__ = scheduler.timesteps[0]
lowercase__ = scheduler.timesteps[1]
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def lowercase__ ( self : Dict ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = 1
scheduler.set_timesteps(lowerCamelCase )
lowercase__ = scheduler.timesteps
lowercase__ = torch.manual_seed(0 )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase ):
# 1. scale model input
lowercase__ = scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# 2. predict noise residual
lowercase__ = model(lowerCamelCase, lowerCamelCase )
# 3. predict previous sample x_t-1
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = [106, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase )
lowercase__ = scheduler.timesteps
lowercase__ = torch.manual_seed(0 )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ = scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# 2. predict noise residual
lowercase__ = model(lowerCamelCase, lowerCamelCase )
# 3. predict previous sample x_t-1
lowercase__ = scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCamelCase ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase, msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = [39, 30, 12, 1, 0]
lowercase__ = len(lowerCamelCase )
with self.assertRaises(lowerCamelCase, msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase, timesteps=lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase )
lowercase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase, msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''', ):
scheduler.set_timesteps(timesteps=lowerCamelCase )
| 207
|
'''simple docstring'''
a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 311
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __lowerCAmelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = "convnextv2"
def __init__( self , _a=3 , _a=4 , _a=4 , _a=None , _a=None , _a="gelu" , _a=0.02 , _a=1E-12 , _a=0.0 , _a=224 , _a=None , _a=None , **_a , ):
super().__init__(**_a )
__a = num_channels
__a = patch_size
__a = num_stages
__a = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a = [3, 3, 9, 3] if depths is None else depths
__a = hidden_act
__a = initializer_range
__a = layer_norm_eps
__a = drop_path_rate
__a = image_size
__a = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__a = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 45
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ ) + 1
lowercase = len(lowerCAmelCase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowercase = [[0 for i in range(lowerCAmelCase__ )] for j in range(lowerCAmelCase__ )]
# since string of zero length match pattern of zero length
lowercase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase__ ):
lowercase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase__ ):
lowercase = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase__ ):
for j in range(1 , lowerCAmelCase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowercase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowercase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowercase = dp[i - 1][j]
else:
lowercase = 0
else:
lowercase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase : str ="aab"
__lowerCAmelCase : Dict ="c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 197
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _A :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : int=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = ids_tensor([self.batch_size] , self.num_choices)
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
__a = True
__a = OpenLlamaModel(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__a = True
__a = True
__a = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
# first forward pass
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size)
__a = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1)
__a = torch.cat([input_mask, next_mask] , dim=-1)
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0]
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1]).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
__a
) = config_and_inputs
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ):
UpperCamelCase__ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCamelCase__ : Union[str, Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : str = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = OpenLlamaModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict["input_ids"]
__a = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE)
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__a = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = "single_label_classification"
__a = input_dict["input_ids"]
__a = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE)
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__a = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = "multi_label_classification"
__a = input_dict["input_ids"]
__a = input_ids.ne(1).to(__SCREAMING_SNAKE_CASE)
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__a = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)])
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ids_tensor([1, 10] , config.vocab_size)
__a = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__a = OpenLlamaModel(__SCREAMING_SNAKE_CASE)
original_model.to(__SCREAMING_SNAKE_CASE)
original_model.eval()
__a = original_model(__SCREAMING_SNAKE_CASE).last_hidden_state
__a = original_model(__SCREAMING_SNAKE_CASE).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__a = {"type": scaling_type, "factor": 10.0}
__a = OpenLlamaModel(__SCREAMING_SNAKE_CASE)
scaled_model.to(__SCREAMING_SNAKE_CASE)
scaled_model.eval()
__a = scaled_model(__SCREAMING_SNAKE_CASE).last_hidden_state
__a = scaled_model(__SCREAMING_SNAKE_CASE).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5))
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5))
| 49
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311
| 0
|
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : List[str] = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = 1_000_000 ):
'''simple docstring'''
lowercase__ : Tuple = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 214
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowercase : int =datasets.load_iris()
_lowercase : Union[str, Any] =np.array(data["data"])
_lowercase : Optional[Any] =np.array(data["target"])
_lowercase : List[Any] =data["target_names"]
_lowercase : Dict =train_test_split(X, y)
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : Dict) -> Optional[int]:
"""simple docstring"""
return np.linalg.norm(np.array(_lowercase) - np.array(_lowercase))
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Any , _lowercase : Any=5) -> List[str]:
"""simple docstring"""
a__ : int = zip(_lowercase , _lowercase)
# List of distances of all points from the point to be classified
a__ : List[Any] = []
for data_point in data:
a__ : List[str] = euclidean_distance(data_point[0] , _lowercase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
a__ : Union[str, Any] = [i[1] for i in sorted(_lowercase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a__ : List[str] = Counter(_lowercase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 170
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311
| 0
|
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
assert column_title.isupper()
A_ = 0
A_ = len(__UpperCamelCase ) - 1
A_ = 0
while index >= 0:
A_ = (ord(column_title[index] ) - 64) * pow(26 ,__UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 312
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__a :int = True
except ImportError:
__a :Optional[Any] = False
try:
from torch.hub import _get_torch_home
__a :Optional[Any] = _get_torch_home()
except ImportError:
__a :Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__a :Optional[Any] = os.path.join(torch_cache_home, 'transformers')
__a :int = 'https://cdn.huggingface.co'
__a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__a :str = os.path.join(PATH, 'config.yaml')
__a :str = os.path.join(PATH, 'attributes.txt')
__a :Optional[Any] = os.path.join(PATH, 'objects.txt')
__a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__a :List[str] = 'pytorch_model.bin'
__a :Tuple = 'config.yaml'
def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ):
"""simple docstring"""
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = OrderedDict()
with open(__UpperCamelCase ,"rb" ) as f:
A_ = pkl.load(__UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__UpperCamelCase )
if isinstance(__UpperCamelCase ,np.ndarray ):
A_ = torch.tensor(__UpperCamelCase )
else:
assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase )
A_ = v
return r
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ):
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(UpperCAmelCase )
A_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
A_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
A_ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = val
A_ = val
A_ = key.split("." )
A_ = len(UpperCAmelCase ) - 1
A_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def __A ( self : List[str] ):
return self._pointer
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
with open(f'''{file_name}''' , "w" ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def __A ( UpperCAmelCase : Optional[int] ):
with open(UpperCAmelCase ) as stream:
A_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self : str ):
A_ = " "
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = ""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ):
A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ):
A_ = kwargs.pop("cache_dir" , UpperCAmelCase )
A_ = kwargs.pop("force_download" , UpperCAmelCase )
A_ = kwargs.pop("resume_download" , UpperCAmelCase )
A_ = kwargs.pop("proxies" , UpperCAmelCase )
A_ = kwargs.pop("local_files_only" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
A_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
A_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
A_ = "Can't load config for"
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCAmelCase ), kwargs
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = torch.load("dump.pt" ,map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = urlparse(__UpperCamelCase )
return parsed.scheme in ("http", "https")
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,):
"""simple docstring"""
A_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
A_ = {"user-agent": ua}
if resume_size > 0:
A_ = "bytes=%d-" % (resume_size,)
A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A_ = response.headers.get("Content-Length" )
A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None
A_ = tqdm(
unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCamelCase ) )
temp_file.write(__UpperCamelCase )
progress.close()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase )
if response.status_code == 200:
A_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase )
# get cache path to put the file
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCamelCase ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__UpperCamelCase ) > 0:
return os.path.join(__UpperCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + ".lock"
with FileLock(__UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__UpperCamelCase ,"a+b" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__UpperCamelCase ):
A_ = os.stat(__UpperCamelCase ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,)
http_get(
__UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,)
os.replace(temp_file.name ,__UpperCamelCase )
A_ = {"url": url, "etag": etag}
A_ = cache_path + ".json"
with open(__UpperCamelCase ,"w" ) as meta_file:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return cache_path
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = url.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if is_remote_url(__UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
elif os.path.exists(__UpperCamelCase ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__UpperCamelCase )
A_ = output_file.replace("." ,"-" ) + "-extracted"
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + ".lock"
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase )
os.makedirs(__UpperCamelCase )
if is_zipfile(__UpperCamelCase ):
with ZipFile(__UpperCamelCase ,"r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCamelCase ):
A_ = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) )
return output_path_extracted
return output_path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__UpperCamelCase )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__UpperCamelCase )
except Exception:
A_ = data.split("\n" )
req.close()
return data
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = requests.get(__UpperCamelCase )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCamelCase )
with open(__UpperCamelCase ,"rb" ) as stream:
A_ = pkl.load(__UpperCamelCase )
A_ = weights.pop("model" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__UpperCamelCase )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("running_var" ,"num_batches_tracked" )
A_ = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
A_ = cva.imread(__UpperCamelCase )
else:
A_ = get_image_from_url(__UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
| 312
| 1
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = LxmertConfig.from_json_file(__UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
A_ = LxmertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 312
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__a :int = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['input_features']
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any]=80 , UpperCAmelCase : List[Any]=16000 , UpperCAmelCase : List[str]=160 , UpperCAmelCase : str=30 , UpperCAmelCase : str=400 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Union[str, Any]=False , **UpperCAmelCase : List[Any] , ):
super().__init__(
feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
A_ = n_fft
A_ = hop_length
A_ = chunk_length
A_ = chunk_length * sampling_rate
A_ = self.n_samples // hop_length
A_ = sampling_rate
A_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=UpperCAmelCase , norm="slaney" , mel_scale="slaney" , )
def __A ( self : str , UpperCAmelCase : np.array ):
A_ = spectrogram(
UpperCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
A_ = log_spec[:, :-1]
A_ = np.maximum(UpperCAmelCase , log_spec.max() - 8.0 )
A_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : float = 0.0 ):
if attention_mask is not None:
A_ = np.array(UpperCAmelCase , np.intaa )
A_ = []
for vector, length in zip(UpperCAmelCase , attention_mask.sum(-1 ) ):
A_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A_ = padding_value
normed_input_values.append(UpperCAmelCase )
else:
A_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[str] = "max_length" , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ = isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A_ = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
A_ = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ = [np.asarray([raw_speech] ).T]
A_ = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
A_ = self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
A_ = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
A_ = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
A_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
A_ = [self._np_extract_fbank_features(UpperCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , UpperCAmelCase ):
A_ = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
A_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
A_ = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
A_ = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
def __A ( self : Optional[Any] ):
A_ = copy.deepcopy(self.__dict__ )
A_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 312
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None
| 312
| 1
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any]=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : int=24 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : Any=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=1000 , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = scope
A_ = range_bbox
def __A ( self : Optional[Any] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = t
A_ = None
if self.use_input_mask:
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self : int ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self : str , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int , ):
A_ = LiltModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase , token_type_ids=UpperCAmelCase )
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , ):
A_ = self.num_labels
A_ = LiltForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , ):
A_ = LiltForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : int ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : Union[str, Any] = False
def __A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ):
return True
def __A ( self : Optional[int] ):
A_ = LiltModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
def __A ( self : List[str] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
@slow
def __A ( self : Optional[Any] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = LiltModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any ):
A_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(UpperCAmelCase )
A_ = torch.tensor([[1, 2]] , device=UpperCAmelCase )
A_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase )
A_ = torch.Size([1, 2, 768] )
A_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase , atol=1E-3 ) )
| 312
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 312
| 1
|
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 312
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 312
| 1
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" ,[
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" ,"w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" ,"w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
A_ = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" ,[
DatasetInfo(),
DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,),
] ,)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : DatasetInfo ):
"""simple docstring"""
A_ = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
A_ = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase ,"dataset_info.json" ) )
def __snake_case ( ):
"""simple docstring"""
A_ = DatasetInfo(
description="foo" ,citation="bar" ,homepage="https://foo.bar" ,license="CC0" ,features=Features({"a": Value("int32" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train", "num_examples": 42}] ,download_checksums={} ,download_size=1337 ,post_processing_size=442 ,dataset_size=1234 ,size_in_bytes=1337 + 442 + 1234 ,)
A_ = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
A_ = yaml.safe_dump(__UpperCamelCase )
A_ = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def __snake_case ( ):
"""simple docstring"""
A_ = DatasetInfo()
A_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" ,[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" ,features=Features({"a": Value("int32" )} ) ,builder_name="builder" ,config_name="config" ,version="1.0.0" ,splits=[{"name": "train"}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] ,)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : DatasetInfosDict ):
"""simple docstring"""
A_ = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
A_ = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase ,"README.md" ) )
| 312
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__a :Any = logging.getLogger()
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {}
A_ = os.path.join(__UpperCamelCase ,"all_results.json" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase ,"r" ) as f:
A_ = json.load(__UpperCamelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__a :Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
import xla_spawn
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
A_ = time()
xla_spawn.main()
A_ = time()
A_ = get_results(UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __A ( self : Optional[int] ):
import xla_spawn
A_ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
xla_spawn.main()
| 312
|
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a :Optional[int] = logging.get_logger(__name__)
__a :Union[str, Any] = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 'vit'
def __init__( self : List[str] , UpperCAmelCase : str=768 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Union[str, Any]=3072 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : List[str]=1E-12 , UpperCAmelCase : Union[str, Any]=224 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=16 , **UpperCAmelCase : List[str] , ):
super().__init__(**UpperCAmelCase )
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = layer_norm_eps
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = qkv_bias
A_ = encoder_stride
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = version.parse('1.11' )
@property
def __A ( self : Tuple ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __A ( self : Optional[Any] ):
return 1E-4
| 312
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
| 1
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__a :Dict = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[Any] ):
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 312
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a :Tuple = logging.get_logger(__name__)
__a :Tuple = {'tokenizer_file': 'tokenizer.json'}
__a :Dict = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[str] = ['input_ids', 'attention_mask']
_lowerCamelCase : Any = None
def __init__( self : Union[str, Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[Any]=False , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , pad_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
A_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
A_ = add_prefix_space
A_ = pre_tok_class(**UpperCAmelCase )
A_ = add_prefix_space
def __A ( self : List[str] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ):
A_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
A_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : "Conversation" ):
A_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
A_ = input_ids[-self.model_max_length :]
return input_ids
| 312
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 1
|
import sys
import turtle
def __snake_case ( __UpperCamelCase : tuple[float, float] ,__UpperCamelCase : tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __snake_case ( __UpperCamelCase : tuple[float, float] ,__UpperCamelCase : tuple[float, float] ,__UpperCamelCase : tuple[float, float] ,__UpperCamelCase : int ,):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
__a :Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
__a :str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 312
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 312
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = CustomTokenizer
pass
| 312
|
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
from __future__ import annotations
import math
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ):
"""simple docstring"""
if len(__UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
A_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def __snake_case ( __UpperCamelCase : list ):
"""simple docstring"""
if len(__UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
A_ = len(__UpperCamelCase )
A_ = matrix_length // 2
A_ = [[a[i][j] for j in range(__UpperCamelCase ,__UpperCamelCase )] for i in range(__UpperCamelCase )]
A_ = [
[a[i][j] for j in range(__UpperCamelCase ,__UpperCamelCase )] for i in range(__UpperCamelCase ,__UpperCamelCase )
]
A_ = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase )]
A_ = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase ,__UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def __snake_case ( __UpperCamelCase : list ):
"""simple docstring"""
return len(__UpperCamelCase ), len(matrix[0] )
def __snake_case ( __UpperCamelCase : list ):
"""simple docstring"""
print("\n".join(str(__UpperCamelCase ) for line in matrix ) )
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(__UpperCamelCase ,__UpperCamelCase )
A_ , A_ , A_ , A_ = split_matrix(__UpperCamelCase )
A_ , A_ , A_ , A_ = split_matrix(__UpperCamelCase )
A_ = actual_strassen(__UpperCamelCase ,matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) )
A_ = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
A_ = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
A_ = actual_strassen(__UpperCamelCase ,matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) )
A_ = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
A_ = actual_strassen(matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
A_ = actual_strassen(matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
A_ = matrix_addition(matrix_subtraction(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ) ,__UpperCamelCase )
A_ = matrix_addition(__UpperCamelCase ,__UpperCamelCase )
A_ = matrix_addition(__UpperCamelCase ,__UpperCamelCase )
A_ = matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ) ,__UpperCamelCase )
# construct the new matrix from our 4 quadrants
A_ = []
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase )[1] != matrix_dimensions(__UpperCamelCase )[0]:
A_ = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__UpperCamelCase )
A_ = matrix_dimensions(__UpperCamelCase )
A_ = matrix_dimensions(__UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A_ = max(*__UpperCamelCase ,*__UpperCamelCase )
A_ = int(math.pow(2 ,math.ceil(math.loga(__UpperCamelCase ) ) ) )
A_ = matrixa
A_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,__UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A_ = actual_strassen(__UpperCamelCase ,__UpperCamelCase )
# Removing the additional zeros
for i in range(0 ,__UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__a :str = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__a :List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 312
|
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__a :Optional[int] = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
inspect_dataset(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : int ):
"""simple docstring"""
inspect_metric(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = get_dataset_config_names(__UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert list(infos.keys() ) == expected_configs
A_ = expected_configs[0]
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : int ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_split_names(__UpperCamelCase ,config_name=__UpperCamelCase )
| 312
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
| 1
|
def __snake_case ( __UpperCamelCase : int = 100 ):
"""simple docstring"""
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 1
|
__a :List[Any] = tuple[float, float, float]
__a :Optional[Any] = tuple[float, float, float]
def __snake_case ( __UpperCamelCase : Pointad ,__UpperCamelCase : Pointad ):
"""simple docstring"""
A_ = end_pointa[0] - end_pointa[0]
A_ = end_pointa[1] - end_pointa[1]
A_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __snake_case ( __UpperCamelCase : Vectorad ,__UpperCamelCase : Vectorad ):
"""simple docstring"""
A_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __snake_case ( __UpperCamelCase : Vectorad ,__UpperCamelCase : int ):
"""simple docstring"""
return tuple(round(__UpperCamelCase ,__UpperCamelCase ) for x in vector ) == (0, 0, 0)
def __snake_case ( __UpperCamelCase : Pointad ,__UpperCamelCase : Pointad ,__UpperCamelCase : Pointad ,__UpperCamelCase : int = 10 ):
"""simple docstring"""
A_ = create_vector(__UpperCamelCase ,__UpperCamelCase )
A_ = create_vector(__UpperCamelCase ,__UpperCamelCase )
return is_zero_vector(get_ad_vectors_cross(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
| 312
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 312
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a :Union[str, Any] = logging.get_logger(__name__)
__a :str = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'roberta-prelayernorm'
def __init__( self : List[Any] , UpperCAmelCase : List[Any]=50265 , UpperCAmelCase : Dict=768 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=3072 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : str=1 , UpperCAmelCase : Dict=0 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : int , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class _a ( snake_case_ ):
"""simple docstring"""
@property
def __A ( self : str ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 312
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__a :Any = None
__a :List[str] = logging.get_logger(__name__)
__a :Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__a :List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__a :str = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
__a :Optional[Any] = '▁'
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Any = ['input_ids', 'token_type_ids']
_lowerCamelCase : List[Any] = FNetTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : int="[SEP]" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Optional[Any]="[CLS]" , UpperCAmelCase : Dict="[MASK]" , **UpperCAmelCase : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A_ = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
A_ = do_lower_case
A_ = remove_space
A_ = keep_accents
A_ = vocab_file
A_ = False if not self.vocab_file else True
def __A ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : Dict , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 312
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike]
| 312
| 1
|
import colorsys
from PIL import Image # type: ignore
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = x
A_ = y
for step in range(__UpperCamelCase ): # noqa: B007
A_ = a * a - b * b + x
A_ = 2 * a * b + y
A_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __snake_case ( __UpperCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __snake_case ( __UpperCamelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase ,1 ,1 ) )
def __snake_case ( __UpperCamelCase : int = 800 ,__UpperCamelCase : int = 600 ,__UpperCamelCase : float = -0.6 ,__UpperCamelCase : float = 0 ,__UpperCamelCase : float = 3.2 ,__UpperCamelCase : int = 50 ,__UpperCamelCase : bool = True ,):
"""simple docstring"""
A_ = Image.new("RGB" ,(image_width, image_height) )
A_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
A_ = figure_width / image_width * image_height
A_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
A_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
A_ = get_distance(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A_ = get_color_coded_rgb(__UpperCamelCase )
else:
A_ = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__a :Dict = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 312
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__a :List[str] = logging.get_logger(__name__)
__a :Union[str, Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 'deberta-v2'
def __init__( self : Tuple , UpperCAmelCase : Optional[Any]=128100 , UpperCAmelCase : List[Any]=1536 , UpperCAmelCase : Tuple=24 , UpperCAmelCase : List[Any]=24 , UpperCAmelCase : Tuple=6144 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Any=0 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Any=1E-7 , UpperCAmelCase : Any=False , UpperCAmelCase : List[str]=-1 , UpperCAmelCase : int=0 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[int]="gelu" , **UpperCAmelCase : Optional[Any] , ):
super().__init__(**UpperCAmelCase )
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = relative_attention
A_ = max_relative_positions
A_ = pad_token_id
A_ = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase ) == str:
A_ = [x.strip() for x in pos_att_type.lower().split("|" )]
A_ = pos_att_type
A_ = vocab_size
A_ = layer_norm_eps
A_ = kwargs.get("pooler_hidden_size" , UpperCAmelCase )
A_ = pooler_dropout
A_ = pooler_hidden_act
class _a ( snake_case_ ):
"""simple docstring"""
@property
def __A ( self : Dict ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __A ( self : List[str] ):
return 12
def __A ( self : Union[str, Any] , UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional["TensorType"] = None , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 40 , UpperCAmelCase : int = 40 , UpperCAmelCase : "PreTrainedTokenizerBase" = None , ):
A_ = super().generate_dummy_inputs(preprocessor=UpperCAmelCase , framework=UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 312
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.