code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def __init__( self : Any , a_ : int , a_ : Optional[int]=13 , a_ : List[Any]=7 , a_ : Dict=True , a_ : Any=True , a_ : Dict=False , a_ : Union[str, Any]=True , a_ : Dict=99 , a_ : Tuple=32 , a_ : str=5 , a_ : List[Any]=4 , a_ : Any=37 , a_ : str="gelu" , a_ : Any=0.1 , a_ : Optional[int]=0.1 , a_ : int=512 , a_ : Union[str, Any]=16 , a_ : Tuple=2 , a_ : List[Any]=0.02 , a_ : Any=3 , a_ : Any=4 , a_ : Any=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[Any] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Union[str, Any] , a_ : str , a_ : int , a_ : int , a_ : Optional[int] , a_ : List[Any] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] , a_ : Union[str, Any] , a_ : List[str] , a_ : Optional[Any] , a_ : int , a_ : Any , a_ : List[Any] ):
"""simple docstring"""
__snake_case = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : str , a_ : Dict , a_ : List[str] , a_ : str , a_ : int , a_ : int , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Union[str, Any] , a_ : int , a_ : List[str] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Dict , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int , a_ : Any , a_ : str , a_ : List[Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : Any ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Any , a_ : List[str] , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = DistilBertModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , dim=37 )
def A ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case = True
__snake_case = model_class(config=a_ )
__snake_case = self._prepare_for_class(a_ , a_ )
__snake_case = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
__snake_case = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = DistilBertModel.from_pretrained("distilbert-base-uncased" )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case = model(a_ , attention_mask=a_ )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
__snake_case = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
| 69 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_lowercase = '.'
if __name__ == "__main__":
_lowercase = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
_lowercase = []
_lowercase = []
with open(doctest_file_path) as fp:
for line in fp:
_lowercase = line.strip()
_lowercase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_lowercase = '\n'.join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 118 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case_):
lowerCamelCase__ = 'deberta-v2'
def __init__( self, __a=12_8100, __a=1536, __a=24, __a=24, __a=6144, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=0, __a=0.02, __a=1E-7, __a=False, __a=-1, __a=0, __a=True, __a=None, __a=0, __a="gelu", **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Optional[Any] = relative_attention
_lowerCAmelCase : Optional[int] = max_relative_positions
_lowerCAmelCase : str = pad_token_id
_lowerCAmelCase : int = position_biased_input
# Backwards compatibility
if type(__a) == str:
_lowerCAmelCase : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("|")]
_lowerCAmelCase : Tuple = pos_att_type
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kwargs.get("pooler_hidden_size", __a)
_lowerCAmelCase : List[Any] = pooler_dropout
_lowerCAmelCase : List[Any] = pooler_hidden_act
class UpperCAmelCase_ ( snake_case_):
@property
def snake_case__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)])
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
@property
def snake_case__ ( self):
'''simple docstring'''
return 12
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = -1, __a = False, __a = None, __a = 3, __a = 40, __a = 40, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super().generate_dummy_inputs(preprocessor=__a, framework=__a)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 500 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_(metaclass=snake_case_ ):
"""simple docstring"""
a_ : List[Any] = ['torch', 'torchsde']
def __init__( self , *A , **A ):
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def _lowerCAmelCase ( cls , *A , **A ):
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def _lowerCAmelCase ( cls , *A , **A ):
requires_backends(cls , ['torch', 'torchsde'] )
| 437 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class __UpperCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if self.optimizer is None:
a__ = ['''bias''', '''LayerNorm.weight''']
a__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
a__ = AdamW
a__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=SCREAMING_SNAKE_CASE , optim=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
else:
a__ = optimizer_cls(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
return scheduler
def _UpperCAmelCase ( self ) -> Optional[int]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )[0]
a__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
a__ = model(**SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )[0]
a__ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE , dim=-1 )
a__ , a__ = self.loss_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
a__ = inputs.pop('''labels''' )
a__ , a__ = self._compute_loss(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return loss
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ) -> Optional[int]:
a__ = self._prepare_inputs(SCREAMING_SNAKE_CASE )
a__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE , gen_kwargs['''max_length'''] )
a__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
a__ = tensor
return padded_tensor
| 194 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 | 0 |
from math import factorial
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
__lowercase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowercase = float(factorial(__UpperCamelCase ) )
coefficient /= factorial(__UpperCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 321 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_attention_heads' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[128, 256, 384] , SCREAMING_SNAKE_CASE_=[4, 6, 8] , SCREAMING_SNAKE_CASE_=[2, 3, 4] , SCREAMING_SNAKE_CASE_=[16, 16, 16] , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = kernel_size
lowerCamelCase_ = stride
lowerCamelCase_ = padding
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = depths
lowerCamelCase_ = key_dim
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = patch_size
lowerCamelCase_ = attention_ratio
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = initializer_range
lowerCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = num_labels
lowerCamelCase_ = initializer_range
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = LevitModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = (self.image_size, self.image_size)
lowerCamelCase_ ,lowerCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
lowerCamelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowerCamelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = LevitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = LevitModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
lowerCamelCase_ ,lowerCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
lowerCamelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowerCamelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase_ = False
lowerCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
lowerCamelCase_ = problem_type['title']
lowerCamelCase_ = problem_type['num_labels']
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if problem_type["num_labels"] > 1:
lowerCamelCase_ = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
lowerCamelCase_ = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as warning_list:
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , ) -> Tuple:
snake_case = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def UpperCamelCase ( self ) -> int:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowercase ( snake_case_ , unittest.TestCase ):
_UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self ) -> str:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''apply_ocr''' ) )
def UpperCamelCase ( self ) -> str:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase ( self ) -> Any:
pass
def UpperCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , A__ )
self.assertIsInstance(encoding.boxes , A__ )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case = image_processing(A__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A__ )
self.assertListEqual(encoding.boxes , A__ )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=A__ )
snake_case = image_processing(A__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 342 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ : Tuple = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : int = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
SCREAMING_SNAKE_CASE__ : List[str] = 'zero2'
SCREAMING_SNAKE_CASE__ : Dict = 'zero3'
SCREAMING_SNAKE_CASE__ : List[str] = [ZEROa, ZEROa]
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Any = parameterized.to_safe_name("""_""".join(str(__UpperCamelCase ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ : str = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCAmelCase_ ( snake_case_ ):
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = True , ):
UpperCAmelCase__ : str = models[model]
UpperCAmelCase__ : List[str] = self.run_trainer(
stage=_lowerCAmelCase , model_name=_lowerCAmelCase , eval_steps=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
self.do_checks(_lowerCAmelCase )
return output_dir
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10 , _lowerCAmelCase = 1 , _lowerCAmelCase = True , _lowerCAmelCase = True , ):
UpperCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCAmelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase__ : List[Any] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
UpperCAmelCase__ : List[Any] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
UpperCAmelCase__ : Optional[Any] = self.get_launcher(_lowerCAmelCase )
UpperCAmelCase__ : Any = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
return output_dir
def __UpperCAmelCase ( self , _lowerCAmelCase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
UpperCAmelCase__ : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 79 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A : Any = logging.get_logger(__name__)
A : int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
A : Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__UpperCamelCase )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace("*" , __UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ = "weight"
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ = name.split("." )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = WavLMConfigOrig(checkpoint["cfg"] )
SCREAMING_SNAKE_CASE_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = WavLMConfig()
SCREAMING_SNAKE_CASE_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase , __UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 140 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch | 86 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(__UpperCamelCase )
__a = [[0] * n for i in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
__a = y_points[i]
for i in range(2 , __UpperCamelCase ):
for j in range(__UpperCamelCase , __UpperCamelCase ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a : List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def __init__( self : Any , **a_ : List[str] ):
"""simple docstring"""
super().__init__(**a_ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(a_ )
def __call__( self : Optional[int] , a_ : Union[str, "Image.Image", List[Dict[str, Any]]] , a_ : Union[str, List[str]] = None , **a_ : List[Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
__snake_case = kwargs.pop("text_queries" )
if isinstance(a_ , (str, Image.Image) ):
__snake_case = {"image": image, "candidate_labels": candidate_labels}
else:
__snake_case = image
__snake_case = super().__call__(a_ , **a_ )
return results
def A ( self : int , **a_ : Tuple ):
"""simple docstring"""
__snake_case = {}
if "threshold" in kwargs:
__snake_case = kwargs["threshold"]
if "top_k" in kwargs:
__snake_case = kwargs["top_k"]
return {}, {}, postprocess_params
def A ( self : List[str] , a_ : Dict ):
"""simple docstring"""
__snake_case = load_image(inputs["image"] )
__snake_case = inputs["candidate_labels"]
if isinstance(a_ , a_ ):
__snake_case = candidate_labels.split("," )
__snake_case = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(a_ ):
__snake_case = self.tokenizer(a_ , return_tensors=self.framework )
__snake_case = self.image_processor(a_ , return_tensors=self.framework )
yield {
"is_last": i == len(a_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def A ( self : str , a_ : int ):
"""simple docstring"""
__snake_case = model_inputs.pop("target_size" )
__snake_case = model_inputs.pop("candidate_label" )
__snake_case = model_inputs.pop("is_last" )
__snake_case = self.model(**a_ )
__snake_case = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def A ( self : Dict , a_ : Any , a_ : Optional[int]=0.1 , a_ : Optional[int]=None ):
"""simple docstring"""
__snake_case = []
for model_output in model_outputs:
__snake_case = model_output["candidate_label"]
__snake_case = BaseModelOutput(a_ )
__snake_case = self.image_processor.post_process_object_detection(
outputs=a_ , threshold=a_ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
__snake_case = outputs["scores"][index].item()
__snake_case = self._get_bounding_box(outputs["boxes"][index][0] )
__snake_case = {"score": score, "label": label, "box": box}
results.append(a_ )
__snake_case = sorted(a_ , key=lambda a_ : x["score"] , reverse=a_ )
if top_k:
__snake_case = results[:top_k]
return results
def A ( self : List[str] , a_ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
__snake_case , __snake_case , __snake_case , __snake_case = box.int().tolist()
__snake_case = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 69 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __a ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = size if size is not None else {"height": 224, "width": 224}
__lowercase = get_size_dict(_lowerCamelCase )
__lowercase = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowercase = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name="crop_size" )
__lowercase = do_resize
__lowercase = do_rescale
__lowercase = do_normalize
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = size
__lowercase = resample
__lowercase = rescale_factor
__lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
__lowercase = get_resize_output_image_size(_lowerCamelCase , size=size["shortest_edge"] , default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__lowercase = (size["height"], size["width"])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) -> Dict:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size["height"], size["width"]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ) -> Dict:
'''simple docstring'''
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) -> str:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCamelCase , param_name="crop_size" , default_to_square=_lowerCamelCase )
__lowercase = resample if resample is not None else self.resample
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
__lowercase = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 118 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_snake_case = 'true'
def A ( _lowerCamelCase , _lowerCamelCase=82 , _lowerCamelCase=16 ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase : int = RegressionModel()
_lowerCAmelCase : List[str] = deepcopy(__UpperCamelCase )
_lowerCAmelCase : List[Any] = RegressionDataset(length=__UpperCamelCase )
_lowerCAmelCase : List[str] = DataLoader(__UpperCamelCase , batch_size=__UpperCamelCase )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
return model, ddp_model, dataloader
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
_lowerCAmelCase : int = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase : Any = dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
_lowerCAmelCase : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
if use_longest:
return tokenizer.pad(__UpperCamelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=16 )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = Accelerator(dispatch_batches=__UpperCamelCase , split_batches=__UpperCamelCase )
_lowerCAmelCase : Tuple = get_dataloader(__UpperCamelCase , not dispatch_batches )
_lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = batch.values()
with torch.no_grad():
_lowerCAmelCase : List[str] = model(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Tuple = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def A ( _lowerCamelCase , _lowerCamelCase=82 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=16 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = get_basic_setup(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = generate_predictions(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}"
def A ( _lowerCamelCase = False , _lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase : int = evaluate.load("glue" , "mrpc" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_mrpc_setup(__UpperCamelCase , __UpperCamelCase )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
_lowerCAmelCase : Optional[int] = model(**__UpperCamelCase )
_lowerCAmelCase : int = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase , references=batch["labels"] )
_lowerCAmelCase : Optional[int] = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase : Optional[Any] = model(**__UpperCamelCase )
_lowerCAmelCase : Any = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : Optional[Any] = batch["labels"]
_lowerCAmelCase , _lowerCAmelCase : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase , references=__UpperCamelCase )
_lowerCAmelCase : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = Accelerator(split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__UpperCamelCase , __UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase : Union[str, Any] = Accelerator(split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__UpperCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
_lowerCAmelCase : List[Any] = Accelerator()
test_torch_metrics(__UpperCamelCase , 512 )
accelerator.state._reset_state()
def A ( _lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 500 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a_ = '\\n\n'
a_ = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
a_ = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_(datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _lowerCAmelCase ( self , A , A , A = 16 , A = True , A=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase : Tuple = 'cuda'
else:
_lowerCamelCase : int = 'cuda' if torch.cuda.is_available() else 'cpu'
_lowerCamelCase : int = AutoModelForCausalLM.from_pretrained(A )
_lowerCamelCase : Any = model.to(A )
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase : Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase : Union[str, Any] = model.config.max_length - 1
else:
_lowerCamelCase : Dict = model.config.max_length
_lowerCamelCase : Union[str, Any] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors='pt' , return_attention_mask=A , ).to(A )
_lowerCamelCase : Optional[int] = encodings['input_ids']
_lowerCamelCase : Tuple = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
_lowerCamelCase : List[Any] = min(start_index + batch_size , len(A ) )
_lowerCamelCase : Any = encoded_texts[start_index:end_index]
_lowerCamelCase : List[str] = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase : Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
_lowerCamelCase : Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCamelCase : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
_lowerCamelCase : Optional[int] = encoded_batch
with torch.no_grad():
_lowerCamelCase : List[Any] = model(A , attention_mask=A ).logits
_lowerCamelCase : Union[str, Any] = out_logits[..., :-1, :].contiguous()
_lowerCamelCase : List[str] = labels[..., 1:].contiguous()
_lowerCamelCase : Tuple = attn_mask[..., 1:].contiguous()
_lowerCamelCase : Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 437 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 86 | 0 |
a_ : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 194 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 86 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=400 , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=True , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size_divisor
__lowercase = do_rescale
def snake_case__ ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case ( snake_case_ ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def snake_case__ ( self ):
__lowercase = GLPNImageProcessingTester(self )
@property
def snake_case__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size_divisor" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "resample" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_rescale" ) )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case__ ( self ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def snake_case__ ( self ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 321 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,)
def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Optional[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : Tuple ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def __A ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def __A ( self : Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : Tuple ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __A ( self : Union[str, Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
A_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
A_ = -1
else:
A_ = timesteps[i + 1]
A_ = scheduler.previous_timestep(UpperCAmelCase )
A_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
A_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(__UpperCamelCase ,tokenizer_name + 'Fast' )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(__UpperCamelCase ,force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ ,lowerCamelCase_ = checkpoint.split('/' )
lowerCamelCase_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
lowerCamelCase_ = tokenizer.save_pretrained(
__UpperCamelCase ,legacy_format=__UpperCamelCase ,filename_prefix=__UpperCamelCase )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__UpperCamelCase )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
A_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 42 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 86 | 0 |
'''simple docstring'''
_lowercase = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 342 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class UpperCAmelCase_ ( snake_case_ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , **_lowerCAmelCase ):
return {}, {}, {}
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : int = load_image(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = image.size
UpperCAmelCase__ : List[Any] = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = self.model(**_lowerCAmelCase )
return model_outputs
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = model_outputs.predicted_depth
UpperCAmelCase__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_lowerCAmelCase )
UpperCAmelCase__ : Any = prediction.squeeze().cpu().numpy()
UpperCAmelCase__ : List[Any] = (output * 255 / np.max(_lowerCAmelCase )).astype("""uint8""" )
UpperCAmelCase__ : Dict = Image.fromarray(_lowerCAmelCase )
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Union[str, Any] = predicted_depth
UpperCAmelCase__ : List[Any] = depth
return output_dict
| 79 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = XGLMTokenizer
lowerCamelCase__ = XGLMTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __A ( self : Tuple ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = "<pad>"
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __A ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(__magic_name__ ) , 1_008 )
def __A ( self : int ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def __A ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(__magic_name__ , keep_accents=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self : List[Any] ) -> Union[str, Any]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def __A ( self : Union[str, Any] ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__magic_name__ , f.name )
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(f.name , keep_accents=__magic_name__ )
SCREAMING_SNAKE_CASE_ = pickle.dumps(__magic_name__ )
pickle.loads(__magic_name__ )
def __A ( self : str ) -> int:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@slow
def __A ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = "Hello World!"
SCREAMING_SNAKE_CASE_ = [2, 31_227, 4_447, 35]
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) )
@slow
def __A ( self : Any ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/xglm-564M" , padding=__magic_name__ , )
| 140 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , ):
__a = size if size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = apply_ocr
def snake_case_ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case_ ( self ):
__a = LayoutLMvaImageProcessingTester(self )
@property
def snake_case_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """apply_ocr""" ) )
def snake_case_ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __A )
self.assertIsInstance(encoding.boxes , __A )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__a = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# with apply_OCR = True
__a = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__a = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__a = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__a = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __A )
self.assertListEqual(encoding.boxes , __A )
# with apply_OCR = False
__a = LayoutLMvaImageProcessor(apply_ocr=__A )
__a = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 99 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
__SCREAMING_SNAKE_CASE = 'vit'
def __init__( self : Optional[int] , a_ : str=768 , a_ : Optional[int]=12 , a_ : List[Any]=12 , a_ : Optional[int]=3_072 , a_ : Any="gelu" , a_ : Tuple=0.0 , a_ : List[str]=0.0 , a_ : List[str]=0.02 , a_ : Optional[Any]=1e-12 , a_ : Tuple=224 , a_ : int=16 , a_ : Tuple=3 , a_ : Tuple=True , a_ : Optional[Any]=16 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
__snake_case = encoder_stride
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return 1e-4
| 69 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : int = DDIMPipeline
_lowerCamelCase : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCamelCase : Any = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_lowerCamelCase : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowerCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
__lowercase = DDIMScheduler()
__lowercase = {"unet": unet, "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> Tuple:
'''simple docstring'''
if str(_lowerCamelCase ).startswith("mps" ):
__lowercase = torch.manual_seed(_lowerCamelCase )
else:
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__lowercase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = "cpu"
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = self.get_dummy_inputs(_lowerCamelCase )
__lowercase = pipe(**_lowerCamelCase ).images
__lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__lowercase = np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04] )
__lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = "google/ddpm-cifar10-32"
__lowercase = UNetaDModel.from_pretrained(_lowerCamelCase )
__lowercase = DDIMScheduler()
__lowercase = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddim.to(_lowerCamelCase )
ddim.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = ddim(generator=_lowerCamelCase , eta=0.0 , output_type="numpy" ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = "google/ddpm-ema-bedroom-256"
__lowercase = UNetaDModel.from_pretrained(_lowerCamelCase )
__lowercase = DDIMScheduler.from_pretrained(_lowerCamelCase )
__lowercase = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddpm.to(_lowerCamelCase )
ddpm.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = ddpm(generator=_lowerCamelCase , output_type="numpy" ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowercase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 118 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_snake_case = 5_0003
_snake_case = 5_0002
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( snake_case_ , unittest.TestCase):
lowerCamelCase__ = PLBartTokenizer
lowerCamelCase__ = None
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : str = PLBartTokenizer(__a, language_codes="base", keep_accents=__a)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = PLBartTokenizer(__a, language_codes="base", keep_accents=__a)
_lowerCAmelCase : Any = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowerCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
_lowerCAmelCase : Tuple = tokenizer.vocab_size
_lowerCAmelCase : Any = [tokenizer.convert_ids_to_tokens(__a) for x in range(end - 4, __a)]
self.assertListEqual(__a, ["__java__", "__python__", "__en_XX__", "<mask>"])
_lowerCAmelCase : Optional[int] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowerCAmelCase : List[Any] = tokenizer(__a).input_ids
self.assertEqual(
tokenizer.decode(__a, skip_special_tokens=__a, clean_up_tokenization_spaces=__a), __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = PLBartTokenizer(__a, language_codes="multi", keep_accents=__a)
_lowerCAmelCase : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowerCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
_lowerCAmelCase : List[str] = tokenizer.vocab_size
_lowerCAmelCase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(__a) for x in range(end - 7, __a)]
self.assertListEqual(
__a, ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
_lowerCAmelCase : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowerCAmelCase : List[str] = tokenizer(__a).input_ids
self.assertEqual(
tokenizer.decode(__a, skip_special_tokens=__a, clean_up_tokenization_spaces=__a), __a, )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = 'uclanlp/plbart-python-en_XX'
lowerCamelCase__ = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
lowerCamelCase__ = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
lowerCamelCase__ = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : List[Any] = PLBartTokenizer.from_pretrained(
cls.checkpoint_name, language_codes="base", src_lang="python", tgt_lang="en_XX")
_lowerCAmelCase : List[str] = 1
return cls
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"], 5_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"], 5_0002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"], 5_0003)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __a)
def snake_case__ ( self):
'''simple docstring'''
self.assertIn(__a, self.tokenizer.all_special_ids)
_lowerCAmelCase : Tuple = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCAmelCase : List[Any] = self.tokenizer.decode(__a, skip_special_tokens=__a)
_lowerCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__a)
self.assertEqual(__a, __a)
self.assertNotIn(self.tokenizer.eos_token, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0], __a)
_lowerCAmelCase : Any = 10
_lowerCAmelCase : Any = self.tokenizer(__a, max_length=__a, truncation=__a).input_ids[0]
self.assertEqual(ids[-2], 2)
self.assertEqual(ids[-1], __a)
self.assertEqual(len(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]), [5_0004, 5_0001])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a)
_lowerCAmelCase : List[Any] = PLBartTokenizer.from_pretrained(__a)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __a)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=__a, return_tensors="pt")
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist(), [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0], __a)
self.assertEqual(batch.decoder_input_ids[1][-1], 2)
self.assertEqual(batch.labels[1][-2:].tolist(), [2, EN_CODE])
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__a, truncation=__a, max_length=len(self.expected_src_tokens), return_tensors="pt", )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
self.assertIsInstance(__a, __a)
self.assertEqual((2, 26), batch.input_ids.shape)
self.assertEqual((2, 26), batch.attention_mask.shape)
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __a)
self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, PYTHON_CODE])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tokenizer(self.src_text, padding=__a, truncation=__a, max_length=3, return_tensors="pt")
_lowerCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text, padding=__a, truncation=__a, max_length=10, return_tensors="pt")
_lowerCAmelCase : List[Any] = targets["input_ids"]
_lowerCAmelCase : List[Any] = shift_tokens_right(__a, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="java")
self.assertEqual(
nested_simplify(__a), {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 5_0003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0001,
}, )
| 500 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class A_(snake_case_ ):
"""simple docstring"""
a_ : Dict = 'open-llama'
def __init__( self , A=10_0000 , A=4096 , A=1_1008 , A=32 , A=32 , A="silu" , A=2048 , A=0.0_2 , A=1E-6 , A=True , A=0 , A=1 , A=2 , A=False , A=True , A=0.1 , A=0.1 , A=True , A=True , A=None , **A , ):
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = rms_norm_eps
_lowerCamelCase : Union[str, Any] = use_cache
_lowerCamelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , A )
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_dropout_prob
_lowerCamelCase : Tuple = use_stable_embedding
_lowerCamelCase : List[Any] = shared_input_output_embedding
_lowerCamelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCAmelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
_lowerCamelCase : Any = self.rope_scaling.get('type' , A )
_lowerCamelCase : List[Any] = self.rope_scaling.get('factor' , A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(A , A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 437 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __UpperCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=0 ) -> str:
a__ = 1.0 if scale is None else scale
a__ = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ) -> Tuple:
return self.variance.sqrt()
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE )
a__ = args_dim
a__ = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
a__ = domain_map
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = [proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> Any:
super().__init__()
a__ = function
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> str:
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class __UpperCamelCase :
"""simple docstring"""
_lowercase : type
_lowercase : int
_lowercase : Dict[str, int]
def __init__( self , SCREAMING_SNAKE_CASE = 1 ) -> Any:
a__ = dim
a__ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> int:
a__ = self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return 0.0
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE ) -> str:
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class __UpperCamelCase ( snake_case_ ):
"""simple docstring"""
_lowercase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_lowercase : type = StudentT
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
a__ = cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
a__ = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( snake_case_ ):
"""simple docstring"""
_lowercase : Dict[str, int] = {"loc": 1, "scale": 1}
_lowercase : type = Normal
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
a__ = cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __UpperCamelCase ( snake_case_ ):
"""simple docstring"""
_lowercase : Dict[str, int] = {"total_count": 1, "logits": 1}
_lowercase : type = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a__ = cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
a__ , a__ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ) -> List[Any]:
a__ , a__ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 194 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 | 0 |
from math import factorial
lowerCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def __lowercase ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def __lowercase ( _UpperCAmelCase = 60 , _UpperCAmelCase = 1_000_000 ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
__lowercase = 0
# the cached sizes of the previous chains
__lowercase = {}
for start_chain_element in range(1 , __UpperCamelCase ):
# The temporary set will contain the elements of the chain
__lowercase = set()
__lowercase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__lowercase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
__lowercase = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__lowercase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution()}")
| 321 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = LEDTokenizer
SCREAMING_SNAKE_CASE_ = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> str:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase_ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertIn('input_ids' , SCREAMING_SNAKE_CASE_ )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('labels' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('decoder_attention_mask' , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ['A long paragraph for summarization.']
lowerCamelCase_ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = inputs['input_ids']
lowerCamelCase_ = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = ['Summary of the text.', 'Another summary.']
lowerCamelCase_ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output['input_ids']]
lowerCamelCase_ = tokenizer.pad(SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 'A, <mask> AllenNLP sentence.'
lowerCamelCase_ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 42 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( a : Callable , a : float , a : float , a : float , a : float ) ->List[str]:
snake_case = int(np.ceil((x_end - xa) / step_size ) )
snake_case = np.zeros((n + 1,) )
snake_case = ya
snake_case = xa
for k in range(__UpperCamelCase ):
snake_case = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
snake_case = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCAmelCase_ ( snake_case_ ):
__lowerCamelCase = 'vit_mae'
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=0.7_5 , _lowerCAmelCase=False , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : List[str] = qkv_bias
UpperCAmelCase__ : Dict = decoder_num_attention_heads
UpperCAmelCase__ : str = decoder_hidden_size
UpperCAmelCase__ : List[str] = decoder_num_hidden_layers
UpperCAmelCase__ : int = decoder_intermediate_size
UpperCAmelCase__ : str = mask_ratio
UpperCAmelCase__ : int = norm_pix_loss
| 79 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 | 0 |
A : str = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
A : List[str] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = from_type.lower().strip("s" )
SCREAMING_SNAKE_CASE_ = to_type.lower().strip("s" )
SCREAMING_SNAKE_CASE_ = UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE_ = METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE_ = 1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE_ = from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE_ = -(to_exponent - from_exponent)
return value * pow(1_0 , __UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch | 86 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
return [sentence[i : i + ngram_size] for i in range(len(__UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
from __future__ import annotations
a : List[Any] = list[list[int]]
# assigning initial values to the grid
a : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __UpperCAmelCase ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __UpperCAmelCase ( _UpperCAmelCase : Matrix ) -> Union[str, Any]:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __UpperCAmelCase ( _UpperCAmelCase : Matrix ) -> Optional[Any]:
if location := find_empty_location(__UpperCamelCase ):
__snake_case , __snake_case = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__snake_case = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
__snake_case = 0
return None
def __UpperCAmelCase ( _UpperCAmelCase : Matrix ) -> int:
for row in grid:
for cell in row:
print(__UpperCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
a : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 69 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowercase = ''
_lowercase = ''
_lowercase = ''
_lowercase = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase__ ( ) ->int:
__lowercase , __lowercase = get_dataset(__UpperCamelCase , __UpperCamelCase )
print("Processing..." )
__lowercase , __lowercase , __lowercase = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(3_2 )
__lowercase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
__lowercase = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
__lowercase = []
for anno in new_annos[index]:
__lowercase = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(F'''/{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->List[Any]:
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(__UpperCamelCase , "*.txt" ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__UpperCamelCase ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(__UpperCamelCase , F'''{label_name}.jpg''' )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ = 1 ) ->Any:
__lowercase = []
__lowercase = []
__lowercase = []
for idx in range(len(__UpperCamelCase ) ):
__lowercase = []
__lowercase = img_list[idx]
path_list.append(__UpperCamelCase )
__lowercase = anno_list[idx]
__lowercase = cva.imread(__UpperCamelCase )
if flip_type == 1:
__lowercase = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__lowercase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowercase = cva.flip(__UpperCamelCase , __UpperCamelCase )
for bbox in img_annos:
__lowercase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase__ ( __magic_name__ = 3_2 ) ->Dict:
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 118 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( snake_case_):
lowerCamelCase__ = 42
class UpperCAmelCase_ ( snake_case_ , snake_case_):
@register_to_config
def __init__( self, __a = 3, __a = 3, __a = ("DownEncoderBlock2D",), __a = ("UpDecoderBlock2D",), __a = (64,), __a = 1, __a = "silu", __a = 3, __a = 32, __a = 256, __a = 32, __a = None, __a = 0.18_215, __a = "group", ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_lowerCAmelCase : Tuple = Encoder(
in_channels=__a, out_channels=__a, down_block_types=__a, block_out_channels=__a, layers_per_block=__a, act_fn=__a, norm_num_groups=__a, double_z=__a, )
_lowerCAmelCase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCAmelCase : Dict = nn.Convad(__a, __a, 1)
_lowerCAmelCase : List[Any] = VectorQuantizer(__a, __a, beta=0.25, remap=__a, sane_index_shape=__a)
_lowerCAmelCase : int = nn.Convad(__a, __a, 1)
# pass init params to Decoder
_lowerCAmelCase : Dict = Decoder(
in_channels=__a, out_channels=__a, up_block_types=__a, block_out_channels=__a, layers_per_block=__a, act_fn=__a, norm_num_groups=__a, norm_type=__a, )
@apply_forward_hook
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.encoder(__a)
_lowerCAmelCase : Tuple = self.quant_conv(__a)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__a)
@apply_forward_hook
def snake_case__ ( self, __a, __a = False, __a = True):
'''simple docstring'''
if not force_not_quantize:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.quantize(__a)
else:
_lowerCAmelCase : Union[str, Any] = h
_lowerCAmelCase : Any = self.post_quant_conv(__a)
_lowerCAmelCase : Dict = self.decoder(__a, quant if self.config.norm_type == "spatial" else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
def snake_case__ ( self, __a, __a = True):
'''simple docstring'''
_lowerCAmelCase : Dict = sample
_lowerCAmelCase : List[Any] = self.encode(__a).latents
_lowerCAmelCase : Union[str, Any] = self.decode(__a).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a)
| 500 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 0 |
"""simple docstring"""
from math import ceil
def UpperCAmelCase_ ( __a : int = 10_01 ):
'''simple docstring'''
_lowerCamelCase : str = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCamelCase : int = 2 * i + 1
_lowerCamelCase : List[Any] = 2 * i
_lowerCamelCase : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 437 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 86 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 86 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class snake_case ( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
__lowercase = None
def snake_case__ ( self , lowerCAmelCase_ ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=lowerCAmelCase_ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def snake_case__ ( self ):
return dist.get_rank(group=self.process_group ) == 0
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa ):
__lowercase = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ )
dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group )
return target_tensor
def snake_case__ ( self ):
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith("e" )) , lowerCAmelCase_ )
return ifname
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
# single GPU training
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )]
dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(lowerCAmelCase_ ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ )
__lowercase , __lowercase = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
__lowercase = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
__lowercase = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
| 321 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,)
def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Optional[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : Tuple ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def __A ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def __A ( self : Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : Tuple ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __A ( self : Union[str, Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
A_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
A_ = -1
else:
A_ = timesteps[i + 1]
A_ = scheduler.previous_timestep(UpperCAmelCase )
A_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
A_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
A_ = get_logger()
A_ = None
class UpperCAmelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ )
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE_ )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowerCamelCase_ = device if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCamelCase_ = str(jax.devices()[0] )
lowerCamelCase_ = jnp_array_kwargs
@staticmethod
def UpperCamelCase( ) -> str:
'''simple docstring'''
import jax
return {str(SCREAMING_SNAKE_CASE_ ): device for device in jax.devices()}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(SCREAMING_SNAKE_CASE_ , axis=0 )
return column
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_ , (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase_ = {}
if isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase_ = {'dtype': jnp.intaa}
else:
lowerCamelCase_ = {'dtype': jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase_ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
lowerCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE_ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(SCREAMING_SNAKE_CASE_ , '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_ , jax.Array ):
lowerCamelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE_ , map_list=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_ , pa_table.column_names[0] )
lowerCamelCase_ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
lowerCamelCase_ = self._consolidate(batch[column_name] )
return batch
| 42 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 86 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 342 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : str = TypeVar("""T""")
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self , _lowerCAmelCase = True ):
UpperCAmelCase__ : List[Any] = {} # dictionary of lists
UpperCAmelCase__ : int = directed
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
self.adj_list[destination_vertex].append(_lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
UpperCAmelCase__ : Tuple = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowerCAmelCase )
UpperCAmelCase__ : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCAmelCase__ : Union[str, Any] = [destination_vertex]
UpperCAmelCase__ : Any = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCAmelCase__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCAmelCase__ : int = [destination_vertex]
UpperCAmelCase__ : Optional[Any] = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 79 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 | 0 |
def a__ ( __UpperCamelCase , __UpperCamelCase = 0 ):
SCREAMING_SNAKE_CASE_ = length or len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , __A ):
__a = data
def __iter__( self ):
for element in self.data:
yield element
def a (lowerCAmelCase__=True ):
__a = Accelerator(even_batches=__UpperCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
if iterable:
__a = DummyIterableDataset(torch.as_tensor(range(__UpperCamelCase ) ) )
else:
__a = TensorDataset(torch.as_tensor(range(__UpperCamelCase ) ) )
__a = DataLoader(__UpperCamelCase , batch_size=__UpperCamelCase )
__a = accelerator.prepare(__UpperCamelCase )
return dl
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
__a = create_dataloader(accelerator=__UpperCamelCase , dataset_size=__UpperCamelCase , batch_size=__UpperCamelCase )
__a = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a ():
__a = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__UpperCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__UpperCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a ():
__a = create_accelerator(even_batches=__UpperCamelCase )
verify_dataloader_batch_sizes(
__UpperCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__UpperCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a ():
__a = create_accelerator(even_batches=__UpperCamelCase )
__a = torch.nn.Linear(1 , 1 )
__a = accelerator.prepare(__UpperCamelCase )
__a = create_dataloader(__UpperCamelCase , dataset_size=3 , batch_size=1 )
__a = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__UpperCamelCase ):
__a = ddp_model(batch[0].float() )
__a = output.sum()
loss.backward()
batch_idxs.append(__UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a (lowerCAmelCase__ ):
with warnings.catch_warnings(record=__UpperCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __UpperCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def a ():
__a = True
__a = False
__a = create_accelerator(even_batches=__UpperCamelCase )
__a = torch.nn.Linear(1 , 1 )
__a = accelerator.prepare(__UpperCamelCase )
__a = create_dataloader(__UpperCamelCase , dataset_size=3 , batch_size=1 )
__a = create_dataloader(__UpperCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__UpperCamelCase ):
__a = train_dl.batch_sampler.even_batches
__a = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a ():
__a = True
__a = False
__a = create_accelerator(even_batches=__UpperCamelCase )
__a = torch.nn.Linear(1 , 1 )
__a = accelerator.prepare(__UpperCamelCase )
create_dataloader(__UpperCamelCase , dataset_size=3 , batch_size=1 , iterable=__UpperCamelCase )
__a = create_dataloader(__UpperCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__UpperCamelCase ):
__a = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a ():
__a = create_accelerator()
__a = torch.nn.Linear(1 , 1 )
__a = accelerator.prepare(__UpperCamelCase )
create_dataloader(__UpperCamelCase , dataset_size=3 , batch_size=1 , iterable=__UpperCamelCase )
with warnings.catch_warnings(record=__UpperCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__UpperCamelCase ):
pass
assert issubclass(w[-1].category , __UpperCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def a ():
__a = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
__a = accelerator.state.distributed_type
__a = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__UpperCamelCase )
__a = original_state
if __name__ == "__main__":
main()
| 99 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def __UpperCAmelCase ( ) -> int:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 69 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase : Tuple = (IPNDMScheduler,)
_lowerCamelCase : List[str] = (('num_inference_steps', 50),)
def SCREAMING_SNAKE_CASE ( self , **_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = {"num_train_timesteps": 1_000}
config.update(**_lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("num_inference_steps" , _lowerCamelCase )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**_lowerCamelCase )
__lowercase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
if time_step is None:
__lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
__lowercase = scheduler_class.from_pretrained(_lowerCamelCase )
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowercase = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowercase = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("num_inference_steps" , _lowerCamelCase )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
if time_step is None:
__lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
__lowercase = scheduler_class.from_pretrained(_lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowercase = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowercase = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self , **_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**_lowerCamelCase )
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(_lowerCamelCase , _lowerCamelCase )
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(_lowerCamelCase , _lowerCamelCase )
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("num_inference_steps" , _lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCamelCase )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_lowerCamelCase )
elif num_inference_steps is not None and not hasattr(_lowerCamelCase , "set_timesteps" ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.timesteps[5]
__lowercase = scheduler.timesteps[6]
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowercase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase , time_step=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_lowerCamelCase , time_step=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.full_loop()
__lowercase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 118 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 0 |
_snake_case = 6_5521
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Optional[Any] = 0
for plain_chr in plain_text:
_lowerCAmelCase : Union[str, Any] = (a + ord(__UpperCamelCase )) % MOD_ADLER
_lowerCAmelCase : Optional[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 500 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( *__a : List[str] ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCamelCase : int = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
_lowerCamelCase : Dict = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase_ ( __a : Exception ):
'''simple docstring'''
_lowerCamelCase : Tuple = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase_ ( __a : callable = None , __a : int = 1_28 ):
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
_lowerCamelCase : Union[str, Any] = starting_batch_size
def decorator(*__a : Union[str, Any] , **__a : str ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCamelCase : Optional[Any] = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
_lowerCamelCase : Any = ', '.join([f"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"Batch size was passed into `{function.__name__}` as the first argument when called."
f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 437 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , snake_case_ , )
class __UpperCamelCase ( snake_case_ ):
"""simple docstring"""
_lowercase : Union[str, Any] = RobertaConfig
_lowercase : Dict = 'roberta'
def __init__( self , SCREAMING_SNAKE_CASE ) -> int:
super().__init__(SCREAMING_SNAKE_CASE )
a__ = RobertaEmbeddings(SCREAMING_SNAKE_CASE )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ''' , snake_case_ , )
class __UpperCamelCase ( snake_case_ ):
"""simple docstring"""
_lowercase : str = RobertaConfig
_lowercase : List[str] = 'roberta'
def __init__( self , SCREAMING_SNAKE_CASE ) -> int:
super().__init__(SCREAMING_SNAKE_CASE )
a__ = config.num_labels
a__ = config.num_hidden_layers
a__ = DeeRobertaModel(SCREAMING_SNAKE_CASE )
a__ = nn.Dropout(config.hidden_dropout_prob )
a__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=-1 , SCREAMING_SNAKE_CASE=False , ) -> int:
a__ = self.num_layers
try:
a__ = self.roberta(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , position_ids=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , inputs_embeds=SCREAMING_SNAKE_CASE , )
a__ = outputs[1]
a__ = self.dropout(SCREAMING_SNAKE_CASE )
a__ = self.classifier(SCREAMING_SNAKE_CASE )
a__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a__ = e.message
a__ = e.exit_layer
a__ = outputs[0]
if not self.training:
a__ = entropy(SCREAMING_SNAKE_CASE )
a__ = []
a__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a__ = MSELoss()
a__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a__ = CrossEntropyLoss()
a__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a__ = []
for highway_exit in outputs[-1]:
a__ = highway_exit[0]
if not self.training:
highway_logits_all.append(SCREAMING_SNAKE_CASE )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a__ = MSELoss()
a__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a__ = CrossEntropyLoss()
a__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(SCREAMING_SNAKE_CASE )
if train_highway:
a__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a__ = (loss,) + outputs
if not self.training:
a__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 194 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowercase ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
__lowercase = torch.exp(__UpperCamelCase )
__lowercase = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
__lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__()
__lowercase = config.output_attentions
__lowercase = config.output_hidden_states
__lowercase = nn.ModuleList([BertLayer(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
__lowercase = nn.ModuleList([BertHighway(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
__lowercase = [-1 for _ in range(config.num_hidden_layers )]
def snake_case__ ( self , lowerCAmelCase_ ):
if (type(lowerCAmelCase_ ) is float) or (type(lowerCAmelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowercase = x
else:
__lowercase = x
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
__lowercase = ()
__lowercase = ()
__lowercase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowercase = all_hidden_states + (hidden_states,)
__lowercase = layer_module(
lowerCAmelCase_ , lowerCAmelCase_ , head_mask[i] , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = layer_outputs[0]
if self.output_attentions:
__lowercase = all_attentions + (layer_outputs[1],)
__lowercase = (hidden_states,)
if self.output_hidden_states:
__lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowercase = current_outputs + (all_attentions,)
__lowercase = self.highway[i](lowerCAmelCase_ )
# logits, pooled_output
if not self.training:
__lowercase = highway_exit[0]
__lowercase = entropy(lowerCAmelCase_ )
__lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCAmelCase_ , i + 1 )
else:
__lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowercase = all_hidden_states + (hidden_states,)
__lowercase = (hidden_states,)
if self.output_hidden_states:
__lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
__lowercase = outputs + (all_attentions,)
__lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ ,snake_case_ ,)
class snake_case ( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
__lowercase = config
__lowercase = BertEmbeddings(lowerCAmelCase_ )
__lowercase = DeeBertEncoder(lowerCAmelCase_ )
__lowercase = BertPooler(lowerCAmelCase_ )
self.init_weights()
def snake_case__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def snake_case__ ( self ):
return self.embeddings.word_embeddings
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = value
def snake_case__ ( self , lowerCAmelCase_ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase_ )
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__lowercase = input_ids.size()
elif inputs_embeds is not None:
__lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowercase = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ )
if encoder_attention_mask is None:
__lowercase = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ )
if token_type_ids is None:
__lowercase = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowercase = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowercase = encoder_attention_mask[:, None, None, :]
__lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowercase = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowercase = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers )
__lowercase = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ )
__lowercase = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(lowerCAmelCase_ )
__lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case ( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = message
__lowercase = exit_layer # start from 1!
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__()
__lowercase = BertPooler(lowerCAmelCase_ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size , config.num_labels )
def snake_case__ ( self , lowerCAmelCase_ ):
# Pooler
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(lowerCAmelCase_ )
# "return" pooler_output
# BertModel
__lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowercase = bmodel_output[1]
__lowercase = self.dropout(lowerCAmelCase_ )
__lowercase = self.classifier(lowerCAmelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. """ ,snake_case_ ,)
class snake_case ( snake_case_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeBertModel(lowerCAmelCase_ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=-1 , lowerCAmelCase_=False , ):
__lowercase = self.num_layers
try:
__lowercase = self.bert(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowercase = outputs[1]
__lowercase = self.dropout(lowerCAmelCase_ )
__lowercase = self.classifier(lowerCAmelCase_ )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(lowerCAmelCase_ )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase_ )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 321 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
'''simple docstring'''
from PIL import Image
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__UpperCamelCase ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
A_ = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 42 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
def __UpperCamelCase ( a : list , a : int , a : int = 0 , a : int = 0 ) ->Optional[int]:
snake_case = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 79 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a__ ( __UpperCamelCase ):
return np.dot(__UpperCamelCase , __UpperCamelCase )
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , *,
__magic_name__ : float = np.inf , __magic_name__ : str = "linear" , __magic_name__ : float = 0.0 , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = regularization
SCREAMING_SNAKE_CASE_ = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
SCREAMING_SNAKE_CASE_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE_ = F'''Unknown kernel: {kernel}'''
raise ValueError(__magic_name__ )
def __A ( self : str , __magic_name__ : ndarray , __magic_name__ : ndarray ) -> Optional[int]:
return np.dot(__magic_name__ , __magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : ndarray , __magic_name__ : ndarray ) -> List[str]:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __A ( self : int , __magic_name__ : list[ndarray] , __magic_name__ : ndarray ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = observations
SCREAMING_SNAKE_CASE_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE_ ) , ) = np.shape(__magic_name__ )
def to_minimize(__magic_name__ : ndarray ) -> float:
SCREAMING_SNAKE_CASE_ = 0
((SCREAMING_SNAKE_CASE_ ) , ) = np.shape(__magic_name__ )
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__magic_name__ )
SCREAMING_SNAKE_CASE_ = LinearConstraint(__magic_name__ , 0 , 0 )
SCREAMING_SNAKE_CASE_ = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE_ = minimize(
__magic_name__ , np.ones(__magic_name__ ) , bounds=__magic_name__ , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE_ = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE_ = 0
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE_ = s / n
def __A ( self : Dict , __magic_name__ : ndarray ) -> str:
SCREAMING_SNAKE_CASE_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __magic_name__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch | 86 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a : Optional[Any] = logging.get_logger(__name__)
a : Dict = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a : Optional[int] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
__SCREAMING_SNAKE_CASE = 'whisper'
__SCREAMING_SNAKE_CASE = ['past_key_values']
__SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , a_ : Tuple=51_865 , a_ : Dict=80 , a_ : Any=6 , a_ : Optional[Any]=4 , a_ : Optional[int]=6 , a_ : Any=4 , a_ : str=1_536 , a_ : Optional[int]=1_536 , a_ : Optional[Any]=0.0 , a_ : Any=0.0 , a_ : Tuple=50_257 , a_ : List[str]=True , a_ : str=True , a_ : str="gelu" , a_ : Optional[Any]=256 , a_ : Union[str, Any]=0.0 , a_ : Any=0.0 , a_ : str=0.0 , a_ : List[str]=0.02 , a_ : List[str]=False , a_ : str=1_500 , a_ : Any=448 , a_ : str=50_256 , a_ : List[Any]=50_256 , a_ : Any=50_256 , a_ : int=None , a_ : Optional[Any]=[220, 50_256] , a_ : Optional[Any]=False , a_ : int=256 , a_ : List[str]=False , a_ : Dict=0.05 , a_ : List[str]=10 , a_ : int=2 , a_ : Tuple=0.0 , a_ : int=10 , a_ : Optional[int]=0 , a_ : Dict=7 , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = num_mel_bins
__snake_case = d_model
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = encoder_ffn_dim
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = max_source_positions
__snake_case = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
__snake_case = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
__snake_case = median_filter_width
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , suppress_tokens=a_ , begin_suppress_tokens=a_ , **a_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = {0: "batch"}
else:
__snake_case = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
return common_inputs
def A ( self : List[Any] , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 22_050 , a_ : float = 5.0 , a_ : int = 220 , ):
"""simple docstring"""
__snake_case = OrderedDict()
__snake_case = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a_ , framework=a_ , sampling_rate=a_ , time_duration=a_ , frequency=a_ , )
__snake_case = encoder_inputs["input_features"].shape[2]
__snake_case = encoder_sequence_length // 2 if self.use_past else seq_length
__snake_case = super().generate_dummy_inputs(
preprocessor.tokenizer , a_ , a_ , a_ , a_ )
__snake_case = encoder_inputs.pop("input_features" )
__snake_case = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
__snake_case = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def A ( self : Dict ):
"""simple docstring"""
return 1e-3
| 69 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 | 0 |
"""simple docstring"""
import math
def lowerCAmelCase__ ( __magic_name__ ) ->List[Any]:
__lowercase = 0
__lowercase = 0
while num > 0:
__lowercase = num % 8
__lowercase = octal + (remainder * math.floor(math.pow(1_0 , __UpperCamelCase ) ))
counter += 1
__lowercase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(__UpperCamelCase )}'''
def lowerCAmelCase__ ( ) ->List[str]:
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(6_5 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(2_1_6 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 118 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 | 0 |
from jiwer import compute_measures
import datasets
_snake_case = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_snake_case = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_snake_case = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
], )
def snake_case__ ( self, __a=None, __a=None, __a=False):
'''simple docstring'''
if concatenate_texts:
return compute_measures(__a, __a)["wer"]
else:
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Tuple = 0
for prediction, reference in zip(__a, __a):
_lowerCAmelCase : Optional[int] = compute_measures(__a, __a)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 500 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 0 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCAmelCase_ ( __a : List[Any] , __a : str , __a : Tuple=[] ):
'''simple docstring'''
_lowerCamelCase : Any = size[0] - overlap_pixels * 2
_lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowerCamelCase : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
_lowerCamelCase : Any = np.pad(__UpperCamelCase , mode='linear_ramp' , pad_width=__UpperCamelCase , end_values=0 )
if "l" in remove_borders:
_lowerCamelCase : int = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowerCamelCase : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowerCamelCase : List[str] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowerCamelCase : Any = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCAmelCase_ ( __a : List[str] , __a : Dict , __a : Dict ):
'''simple docstring'''
return max(__UpperCamelCase , min(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase_ ( __a : [int] , __a : [int] , __a : [int] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def UpperCAmelCase_ ( __a : [int] , __a : int , __a : [int] ):
'''simple docstring'''
_lowerCamelCase : Dict = list(__UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowerCamelCase : Union[str, Any] = clamp_rect(__UpperCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def UpperCAmelCase_ ( __a : Optional[int] , __a : int , __a : List[Any] , __a : Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__UpperCamelCase , (original_slice, 0) )
return result
def UpperCAmelCase_ ( __a : int , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowerCamelCase : Any = tile.crop(__UpperCamelCase )
return tile
def UpperCAmelCase_ ( __a : List[Any] , __a : Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = n % d
return n - divisor
class A_(snake_case_ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A = 350 , ):
super().__init__(
vae=A , text_encoder=A , tokenizer=A , unet=A , low_res_scheduler=A , scheduler=A , max_noise_level=A , )
def _lowerCAmelCase ( self , A , A , A , A , A , A , A , **A ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowerCamelCase : Optional[int] = add_overlap_rect(A , A , image.size )
_lowerCamelCase : Optional[int] = image.crop(A )
_lowerCamelCase : Optional[Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowerCamelCase : List[str] = translated_slice_x - (original_image_slice / 2)
_lowerCamelCase : List[str] = max(0 , A )
_lowerCamelCase : Tuple = squeeze_tile(A , A , A , A )
_lowerCamelCase : int = to_input.size
_lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowerCamelCase : Union[str, Any] = super(A , self ).__call__(image=A , **A ).images[0]
_lowerCamelCase : int = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowerCamelCase : Tuple = unsqueeze_tile(A , A )
_lowerCamelCase : str = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowerCamelCase : Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowerCamelCase : str = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A ) , mode='L' , )
final_image.paste(
A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A )
@torch.no_grad()
def __call__( self , A , A , A = 75 , A = 9.0 , A = 50 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = None , A = 1 , A = 128 , A = 32 , A = 32 , ):
_lowerCamelCase : Union[str, Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_lowerCamelCase : int = math.ceil(image.size[0] / tile_size )
_lowerCamelCase : List[Any] = math.ceil(image.size[1] / tile_size )
_lowerCamelCase : Tuple = tcx * tcy
_lowerCamelCase : Union[str, Any] = 0
for y in range(A ):
for x in range(A ):
self._process_tile(
A , A , A , A , A , A , A , prompt=A , num_inference_steps=A , guidance_scale=A , noise_level=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : List[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
_lowerCamelCase : List[str] = StableDiffusionTiledUpscalePipeline.from_pretrained(__UpperCamelCase , revision='fp16' , torch_dtype=torch.floataa )
_lowerCamelCase : Union[str, Any] = pipe.to('cuda' )
_lowerCamelCase : int = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(__a : Optional[int] ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save('diffusers_library_progress.jpg' )
_lowerCamelCase : str = pipe(image=__UpperCamelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=__UpperCamelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 437 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 86 | 0 |
def __a ( __UpperCAmelCase ):
a__ = []
if len(__UpperCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(__UpperCamelCase ) ):
a__ = nums.pop(0 )
a__ = permute(__UpperCamelCase )
for perm in permutations:
perm.append(__UpperCamelCase )
result.extend(__UpperCamelCase )
nums.append(__UpperCamelCase )
return result
def __a ( __UpperCAmelCase ):
def backtrack(__UpperCAmelCase ):
if start == len(__UpperCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__UpperCamelCase , len(__UpperCamelCase ) ):
a__ , a__ = nums[i], nums[start]
backtrack(start + 1 )
a__ , a__ = nums[i], nums[start] # backtrack
a__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
a_ : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 194 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 86 | 0 |
def __lowercase ( _UpperCAmelCase = 600_851_475_143 ) -> int:
'''simple docstring'''
try:
__lowercase = int(__UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowercase = 2
__lowercase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowercase = i
while n % i == 0:
__lowercase = n // i
i += 1
return int(__UpperCamelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 321 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DDPMParallelScheduler,)
def __A ( self : List[Any] , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Optional[Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : Tuple ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def __A ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def __A ( self : Optional[int] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : Tuple ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ = torch.arange(UpperCAmelCase )[0:3, None].repeat(1 , UpperCAmelCase )
A_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ = scheduler.batch_step_no_noise(UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __A ( self : Tuple ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
A_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
A_ = pred_prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __A ( self : Union[str, Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
A_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
A_ = -1
else:
A_ = timesteps[i + 1]
A_ = scheduler.previous_timestep(UpperCAmelCase )
A_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [100, 87, 50, 1, 0]
A_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
A_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' ,type=__UpperCamelCase ,default='biencoder-nq-dev.json' ,help='Path to raw DPR training data' ,)
parser.add_argument(
'--evaluation_set' ,type=__UpperCamelCase ,help='where to store parsed evaluation_set file' ,)
parser.add_argument(
'--gold_data_path' ,type=__UpperCamelCase ,help='where to store parsed gold_data_path file' ,)
lowerCamelCase_ = parser.parse_args()
with open(args.src_path ,'r' ) as src_file, open(args.evaluation_set ,'w' ) as eval_file, open(
args.gold_data_path ,'w' ) as gold_file:
lowerCamelCase_ = json.load(__UpperCamelCase )
for dpr_record in tqdm(__UpperCamelCase ):
lowerCamelCase_ = dpr_record['question']
lowerCamelCase_ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(__UpperCamelCase ) + '\n' )
if __name__ == "__main__":
main()
| 42 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
A_ = json.load(__UpperCamelCase )
A_ = LukeConfig(use_entity_aware_attention=__UpperCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
# Load the entity vocab file
A_ = load_entity_vocab(__UpperCamelCase )
A_ = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("<ent>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
A_ = AddedToken("<ent2>" ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,LukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
A_ = state_dict["embeddings.word_embeddings.weight"]
A_ = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A_ = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = f'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["entity_embeddings.entity_embeddings.weight"]
A_ = entity_emb[entity_vocab["[MASK]"]]
A_ = LukeModel(config=__UpperCamelCase ).eval()
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
A_ = LukeTokenizer.from_pretrained(__UpperCamelCase ,task="entity_classification" )
A_ = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A_ = (39, 42)
A_ = tokenizer(__UpperCamelCase ,entity_spans=[span] ,add_prefix_space=__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A_ = torch.Size((1, 42, 1024) )
A_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
A_ = torch.Size((1, 42, 768) )
A_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A_ = torch.Size((1, 1, 1024) )
A_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
A_ = torch.Size((1, 1, 768) )
A_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
A_ , A_ = line.rstrip().split("\t" )
A_ = index
return entity_vocab
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a :Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 86 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowercase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Optional[int] , a : List[str] , a : Union[str, Any] ) ->Any:
for attribute in key.split('''.''' ):
snake_case = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
snake_case = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCamelCase ( a : Union[str, Any] , a : Dict ) ->Optional[int]:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case = None
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
snake_case = True
elif name.split('''.''' )[0] == "proj":
snake_case = fairseq_model.proj
snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__UpperCamelCase )[0].split('''.''' )[-2]
snake_case = mapped_key.replace('''*''' , __UpperCamelCase )
if "weight_g" in name:
snake_case = '''weight_g'''
elif "weight_v" in name:
snake_case = '''weight_v'''
elif "bias" in name:
snake_case = '''bias'''
elif "weight" in name:
snake_case = '''weight'''
else:
snake_case = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def __UpperCamelCase ( a : str , a : Optional[Any] , a : int , a : int , a : Any ) ->str:
snake_case = full_name.split('''conv_layers.''' )[-1]
snake_case = name.split('''.''' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
def __UpperCamelCase ( a : Optional[Any] ) ->str:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : Tuple ) ->str:
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case = f.readlines()
snake_case = [line.split(''' ''' )[0] for line in lines]
snake_case = len(__UpperCamelCase )
snake_case = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( a : List[str] , a : Any , a : List[Any] , a : Union[str, Any] , a : List[str] , a : Optional[Any] , a : Dict , ) ->Optional[Any]:
snake_case = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase , vocab_size=__UpperCamelCase , decoder_layers=__UpperCamelCase , do_stable_layer_norm=__UpperCamelCase )
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case = model[0].eval()
# set weights for wav2vec2 encoder
snake_case = WavaVecaModel(__UpperCamelCase )
snake_case = recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase )
snake_case = SpeechaTextaForCausalLM(__UpperCamelCase )
snake_case , snake_case = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
snake_case = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
snake_case = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
snake_case = False
# add projection layer
snake_case = nn.Parameter(projection_layer.weight )
snake_case = nn.Parameter(projection_layer.bias )
snake_case = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
snake_case = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase , '''vocab.json''' ) )
tokenizer.save_pretrained(__UpperCamelCase )
snake_case = hf_wavavec.config.to_dict()
snake_case = tokenizer.pad_token_id
snake_case = tokenizer.bos_token_id
snake_case = tokenizer.eos_token_id
snake_case = '''speech_to_text_2'''
snake_case = '''wav2vec2'''
snake_case = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=10_224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
_lowercase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 342 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 0 |
import sys
SCREAMING_SNAKE_CASE__ : Dict = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowerCamelCase ( __lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = 1
for digit in s:
product *= int(__UpperCamelCase )
return product
def _lowerCamelCase ( __lowerCamelCase = N ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Dict = -sys.maxsize - 1
UpperCAmelCase__ : Any = n[:13]
UpperCAmelCase__ : Optional[int] = 13
while cur_index < len(__UpperCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCAmelCase__ : Any = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCAmelCase__ : List[Any] = max(__UpperCamelCase , str_eval(__UpperCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Any = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['ChineseCLIPFeatureExtractor']
A : List[Any] = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A , __A , ):
super().__init__()
self.register_modules(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , )
def snake_case_ ( self , __A = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def snake_case_ ( self ):
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , __A = None , **__A , ):
if isinstance(__A , __A ):
__a = 1
elif isinstance(__A , __A ):
__a = len(__A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
# get prompt text embeddings
__a = self.tokenizer(
__A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__a = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a = text_embeddings.shape
__a = text_embeddings.repeat(1 , __A , 1 )
__a = text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = 42
if negative_prompt is None:
__a = [""""""]
elif type(__A ) is not type(__A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !='''
f''' {type(__A )}.''' )
elif isinstance(__A , __A ):
__a = [negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__a = negative_prompt
__a = text_input_ids.shape[-1]
__a = self.tokenizer(
__A , padding="""max_length""" , max_length=__A , truncation=__A , return_tensors="""pt""" , )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a = uncond_embeddings.shape[1]
__a = uncond_embeddings.repeat(__A , __A , 1 )
__a = uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a = torch.randn(
__A , generator=__A , device="""cpu""" , dtype=__A ).to(self.device )
__a = torch.randn(__A , generator=__A , device="""cpu""" , dtype=__A ).to(
self.device )
else:
__a = torch.randn(
__A , generator=__A , device=self.device , dtype=__A )
__a = torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents_reference.to(self.device )
__a = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a = (latents_shape[3] - latents_shape_reference[3]) // 2
__a = (latents_shape[2] - latents_shape_reference[2]) // 2
__a = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a = 0 if dx < 0 else dx
__a = 0 if dy < 0 else dy
__a = max(-dx , 0 )
__a = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
__a = self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
__a = 1 / 0.18215 * latents
__a = self.vae.decode(__A ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a = self.feature_extractor(self.numpy_to_pil(__A ) , return_tensors="""pt""" ).to(
self.device )
__a , __a = self.safety_checker(
images=__A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a = None
if output_type == "pil":
__a = self.numpy_to_pil(__A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 99 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : int = 'CompVis/stable-diffusion-v1-1'
a : Optional[int] = 'CompVis/stable-diffusion-v1-2'
a : List[Any] = 'CompVis/stable-diffusion-v1-3'
a : int = 'CompVis/stable-diffusion-v1-4'
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def __init__( self : Optional[Any] , a_ : AutoencoderKL , a_ : CLIPTextModel , a_ : CLIPTokenizer , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a_ : StableDiffusionSafetyChecker , a_ : CLIPImageProcessor , a_ : bool = True , ):
"""simple docstring"""
super()._init_()
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith("_" )}
def A ( self : str , a_ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def A ( self : List[Any] ):
"""simple docstring"""
self.enable_attention_slicing(a_ )
@torch.no_grad()
def A ( self : Optional[Any] , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Any , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : Optional[Any] , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Dict , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : Dict , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : List[str] , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : str , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Any , ):
"""simple docstring"""
__snake_case = "cuda" if torch.cuda.is_available() else "cpu"
self.to(a_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 69 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__a :Tuple = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ,__UpperCamelCase : int=None ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase )
A_ = WavLMConfigOrig(checkpoint["cfg"] )
A_ = WavLMOrig(__UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(__UpperCamelCase )
else:
A_ = WavLMConfig()
A_ = WavLMModel(__UpperCamelCase )
recursively_load_weights(__UpperCamelCase ,__UpperCamelCase )
hf_wavlm.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__a :Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 0 |
import os
from datetime import datetime as dt
from github import Github
_snake_case = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
_lowerCAmelCase : List[Any] = g.get_repo("huggingface/diffusers" )
_lowerCAmelCase : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
_lowerCAmelCase : int = sorted(issue.get_comments() , key=lambda _lowerCamelCase : i.created_at , reverse=__UpperCamelCase )
_lowerCAmelCase : Dict = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 500 |
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : int = 0 ):
"""simple docstring"""
A_ = length or len(__UpperCamelCase )
A_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ = list_data[i + 1], list_data[i]
A_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase ,length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 0 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ ( __a : int , __a : Tuple , __a : str , __a : str ):
'''simple docstring'''
_lowerCamelCase : int = sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __a : x[0] / x[1] , reverse=__UpperCamelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = [i[0] for i in r], [i[1] for i in r]
_lowerCamelCase : Optional[int] = list(accumulate(__UpperCamelCase ) )
_lowerCamelCase : List[Any] = bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> Any:
a__ = str(id_ )
a__ = None
a__ = None
a__ = []
a__ = {} # {vertex:distance}
def __lt__( self , SCREAMING_SNAKE_CASE ) -> str:
return self.key < other.key
def __repr__( self ) -> List[str]:
return self.id
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
self.neighbors.append(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
a__ = weight
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = []
for u in graph:
a__ = math.inf
a__ = None
a__ = 0
a__ = graph[:]
while q:
a__ = min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
a__ = u
a__ = u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
for u in graph:
a__ = math.inf
a__ = None
a__ = 0
a__ = list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
a__ = hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
a__ = u
a__ = u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__a :List[str] = logging.get_logger(__name__)
__a :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__a :Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
if weight_type is not None:
A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A_ = None
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,)
A_ = True
elif name.split("." )[0] == "proj":
A_ = fairseq_model.proj
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(__UpperCamelCase )[0].split("." )[-2]
A_ = mapped_key.replace("*" ,__UpperCamelCase )
if "weight_g" in name:
A_ = "weight_g"
elif "weight_v" in name:
A_ = "weight_v"
elif "bias" in name:
A_ = "bias"
elif "weight" in name:
A_ = "weight"
else:
A_ = None
set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = full_name.split("conv_layers." )[-1]
A_ = name.split("." )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.split(" " )[0] for line in lines]
A_ = len(__UpperCamelCase )
A_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,):
"""simple docstring"""
A_ = WavaVecaConfig.from_pretrained(__UpperCamelCase )
A_ = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase ,vocab_size=__UpperCamelCase ,decoder_layers=__UpperCamelCase ,do_stable_layer_norm=__UpperCamelCase )
A_ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A_ = model[0].eval()
# set weights for wav2vec2 encoder
A_ = WavaVecaModel(__UpperCamelCase )
A_ = recursively_load_weights_wavaveca(model.encoder ,__UpperCamelCase )
A_ = SpeechaTextaForCausalLM(__UpperCamelCase )
A_ , A_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("embed_out" )
A_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase ,decoder=__UpperCamelCase )
A_ = False
# add projection layer
A_ = nn.Parameter(projection_layer.weight )
A_ = nn.Parameter(projection_layer.bias )
A_ = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,"vocab.json" ) ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
A_ = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase ,"vocab.json" ) )
tokenizer.save_pretrained(__UpperCamelCase )
A_ = hf_wavavec.config.to_dict()
A_ = tokenizer.pad_token_id
A_ = tokenizer.bos_token_id
A_ = tokenizer.eos_token_id
A_ = "speech_to_text_2"
A_ = "wav2vec2"
A_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__a :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 86 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase_ ) for s in shape] )}.npy'''
def snake_case__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self , lowerCAmelCase_=0 , lowerCAmelCase_=(4, 4, 64, 64) , lowerCAmelCase_=False ):
__lowercase = jnp.bfloataa if fpaa else jnp.floataa
__lowercase = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return image
def snake_case__ ( self , lowerCAmelCase_=False , lowerCAmelCase_="CompVis/stable-diffusion-v1-4" ):
__lowercase = jnp.bfloataa if fpaa else jnp.floataa
__lowercase = "bf16" if fpaa else None
__lowercase , __lowercase = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder="unet" , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ )
return model, params
def snake_case__ ( self , lowerCAmelCase_=0 , lowerCAmelCase_=(4, 77, 768) , lowerCAmelCase_=False ):
__lowercase = jnp.bfloataa if fpaa else jnp.floataa
__lowercase = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1000, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase , __lowercase = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=lowerCAmelCase_ )
__lowercase = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
__lowercase = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
__lowercase = model.apply(
{"params": params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
__lowercase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1000, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase , __lowercase = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=lowerCAmelCase_ )
__lowercase = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 96, 96) , fpaa=lowerCAmelCase_ )
__lowercase = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 77, 1024) , fpaa=lowerCAmelCase_ )
__lowercase = model.apply(
{"params": params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
__lowercase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-2 )
| 321 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( snake_case_ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ) -> int:
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase_ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase_ = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase_ = int(SCREAMING_SNAKE_CASE_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase_ = int(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=audio.device )
lowerCamelCase_ = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCamelCase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCamelCase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE_ )
| 42 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
_lowercase = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class _lowercase ( snake_case_ ):
_UpperCAmelCase = 'tapas'
def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10_24 , A__=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__=0.0_2 , A__=1e-12 , A__=0 , A__=1_0.0 , A__=0 , A__=1.0 , A__=None , A__=1.0 , A__=False , A__=None , A__=1.0 , A__=1.0 , A__=False , A__=False , A__="ratio" , A__=None , A__=None , A__=64 , A__=32 , A__=False , A__=True , A__=False , A__=False , A__=True , A__=False , A__=None , A__=None , **A__ , ) -> Optional[Any]:
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_sizes
snake_case = initializer_range
snake_case = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case = positive_label_weight
snake_case = num_aggregation_labels
snake_case = aggregation_loss_weight
snake_case = use_answer_as_supervision
snake_case = answer_loss_importance
snake_case = use_normalized_answer_loss
snake_case = huber_loss_delta
snake_case = temperature
snake_case = aggregation_temperature
snake_case = use_gumbel_for_cells
snake_case = use_gumbel_for_aggregation
snake_case = average_approximation_function
snake_case = cell_selection_preference
snake_case = answer_loss_cutoff
snake_case = max_num_rows
snake_case = max_num_columns
snake_case = average_logits_per_cell
snake_case = select_one_column
snake_case = allow_empty_column_selection
snake_case = init_cell_selection_weights_to_zero
snake_case = reset_position_index_per_cell
snake_case = disable_per_token_loss
# Aggregation hyperparameters
snake_case = aggregation_labels
snake_case = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case = {int(A__ ): v for k, v in aggregation_labels.items()}
| 342 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 0 |
from timeit import timeit
def _lowerCamelCase ( __lowerCamelCase ) -> List[str]:
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase__ : str = 0
while number:
number &= number - 1
result += 1
return result
def _lowerCamelCase ( __lowerCamelCase ) -> Tuple:
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase__ : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
def do_benchmark(__lowerCamelCase ) -> None:
UpperCAmelCase__ : List[str] = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" )
UpperCAmelCase__ : int = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 79 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a :List[Any] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 86 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A : Union[str, Any] = [0, 25, 50]
A : Any = [25, 50, 75]
A : Tuple = fuzz.membership.trimf(X, abca)
A : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A : Union[str, Any] = np.ones(75)
A : Tuple = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
A : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A : Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A : List[str] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A : Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A : Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A : Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 140 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch | 86 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase ) | 86 | 0 |
'''simple docstring'''
from PIL import Image
def __UpperCAmelCase ( _UpperCAmelCase : Image , _UpperCAmelCase : float ) -> str:
def brightness(_UpperCAmelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
a : List[str] = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 69 |
from jiwer import compute_measures
import datasets
__a :List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__a :Union[str, Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
__a :str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __A ( self : Dict , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
A_ = 0
A_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 86 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __a ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self , _lowerCamelCase = 768 , ) -> List[Any]:
'''simple docstring'''
super().__init__()
__lowercase = nn.Parameter(torch.zeros(1 , _lowerCamelCase ) )
__lowercase = nn.Parameter(torch.ones(1 , _lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase = None , _lowerCamelCase = None , ) -> List[Any]:
'''simple docstring'''
__lowercase = nn.Parameter(self.mean.to(_lowerCamelCase ).to(_lowerCamelCase ) )
__lowercase = nn.Parameter(self.std.to(_lowerCamelCase ).to(_lowerCamelCase ) )
return self
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = (embeds - self.mean) * 1.0 / self.std
return embeds
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = (embeds * self.std) + self.mean
return embeds
| 118 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return abs(__UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , __UpperCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = y, x % y
return abs(__UpperCamelCase )
def A ( ):
'''simple docstring'''
try:
_lowerCAmelCase : int = input("Enter two integers separated by comma (,): " ).split("," )
_lowerCAmelCase : Optional[int] = int(nums[0] )
_lowerCAmelCase : Dict = int(nums[1] )
print(
F"greatest_common_divisor({num_a}, {num_a}) = "
F"{greatest_common_divisor(__UpperCamelCase , __UpperCamelCase )}" )
print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__UpperCamelCase , __UpperCamelCase )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 500 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple) ->None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
| 87 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''openai-gpt'''
UpperCAmelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=40_478 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Dict , ) ->Any:
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = afn
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_first_dropout
A__ = summary_proj_to_labels
super().__init__(**UpperCAmelCase__)
| 87 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowerCamelCase : Dict = datasets.logging.get_logger(__name__)
_lowerCamelCase : List[Any] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
_lowerCamelCase : int = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
_lowerCamelCase : Any = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_="dummy_doc" ) -> Optional[int]:
"""simple docstring"""
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(lowercase_ , key_doc_lines[doc] , lowercase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
A__ , A__ = reader.get_doc_mentions(lowercase_ , sys_doc_lines[doc] , lowercase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(lowercase_ , key_doc_lines[doc] , lowercase_ , lowercase_ )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(lowercase_ , lowercase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(lowercase_ , lowercase_ )
A__ = reader.get_mention_assignments(lowercase_ , lowercase_ )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = get_coref_infos(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(lowercase_ , lowercase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''')),
'''references''': datasets.Sequence(datasets.Value('''string''')),
}) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=False) ->Dict:
'''simple docstring'''
A__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(UpperCAmelCase__)
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=UpperCAmelCase__ , sys_lines=UpperCAmelCase__ , metrics=UpperCAmelCase__ , NP_only=UpperCAmelCase__ , remove_nested=UpperCAmelCase__ , keep_singletons=UpperCAmelCase__ , min_span=UpperCAmelCase__ , )
return score
| 87 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 87 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : int = """\
"""
_lowerCamelCase : int = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_lowerCamelCase : Tuple = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=None) ->str:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = '''cuda'''
else:
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__)
A__ = model.to(UpperCAmelCase__)
A__ = AutoTokenizer.from_pretrained(UpperCAmelCase__)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase__) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''pt''' , return_attention_mask=UpperCAmelCase__ , ).to(UpperCAmelCase__)
A__ = encodings['''input_ids''']
A__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 , len(UpperCAmelCase__) , UpperCAmelCase__)):
A__ = min(start_index + batch_size , len(UpperCAmelCase__))
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(UpperCAmelCase__)
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(UpperCAmelCase__), attn_mask] , dim=1)
A__ = encoded_batch
with torch.no_grad():
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , UpperCAmelCase__) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase__)}
| 87 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict:
"""simple docstring"""
if "." in tensor_name:
A__ = tensor_name.split('''.''' )
for split in splits[:-1]:
A__ = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
A__ = new_module
A__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
A__ = tensor_name in module._buffers
A__ = getattr(lowercase_ , lowercase_ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
A__ = False
A__ = False
if is_buffer or not is_bitsandbytes_available():
A__ = False
A__ = False
else:
A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A__ = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ = value.to('''cpu''' )
if value.dtype == torch.inta:
A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
A__ = torch.tensor(lowercase_ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None:
A__ = new_value.T
A__ = old_value.__dict__
if is_abit:
A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
elif is_abit:
A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
A__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) )
else:
if value is None:
A__ = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ = value.to(lowercase_ )
else:
A__ = torch.tensor(lowercase_ , device=lowercase_ )
if is_buffer:
A__ = new_value
else:
A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad )
A__ = new_value
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(lowercase_ )
if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase_ , lowercase_ ):
A__ , A__ = module.weight.shape
else:
A__ = module.in_features
A__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A__ = bnb.nn.LinearabitLt(
lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A__ = bnb.nn.Linearabit(
lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A__ = True
# Store the module class in case we need to transpose the weight later
A__ = type(lowercase_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase_ )
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple:
"""simple docstring"""
A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
A__ , A__ = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , )
return replace_with_bnb_linear(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , )
return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A__ = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(lowercase_ , [] )
A__ = len(lowercase_ ) > 0
# Check if it is a base model
A__ = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(lowercase_ ) - set(lowercase_ )
A__ = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
A__ = ['''.weight''', '''.bias''']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(lowercase_ , '''''' )
filtered_module_names.append(lowercase_ )
return filtered_module_names
| 87 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
_lowerCamelCase : Tuple = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
_lowerCamelCase : Optional[int] = {
"""ctrl""": 256,
}
_lowerCamelCase : Tuple = {
"""Pregnancy""": 168629,
"""Christianity""": 7675,
"""Explain""": 106423,
"""Fitness""": 63440,
"""Saving""": 63163,
"""Ask""": 27171,
"""Ass""": 95985,
"""Joke""": 163509,
"""Questions""": 45622,
"""Thoughts""": 49605,
"""Retail""": 52342,
"""Feminism""": 164338,
"""Writing""": 11992,
"""Atheism""": 192263,
"""Netflix""": 48616,
"""Computing""": 39639,
"""Opinion""": 43213,
"""Alone""": 44967,
"""Funny""": 58917,
"""Gaming""": 40358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 77138,
"""Diet""": 36206,
"""Legal""": 11859,
"""Norman""": 4939,
"""Tip""": 72689,
"""Weight""": 52343,
"""Movies""": 46273,
"""Running""": 23425,
"""Science""": 2090,
"""Horror""": 37793,
"""Confession""": 60572,
"""Finance""": 12250,
"""Politics""": 16360,
"""Scary""": 191985,
"""Support""": 12654,
"""Technologies""": 32516,
"""Teenage""": 66160,
"""Event""": 32769,
"""Learned""": 67460,
"""Notion""": 182770,
"""Wikipedia""": 37583,
"""Books""": 6665,
"""Extract""": 76050,
"""Confessions""": 102701,
"""Conspiracy""": 75932,
"""Links""": 63674,
"""Narcissus""": 150425,
"""Relationship""": 54766,
"""Relationships""": 134796,
"""Reviews""": 41671,
"""News""": 4256,
"""Translation""": 26820,
"""multilingual""": 128406,
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(lowercase_ )
return pairs
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTROL_CODES
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]="<unk>" , **UpperCAmelCase__ : Any) ->Dict:
'''simple docstring'''
super().__init__(unk_token=UpperCAmelCase__ , **UpperCAmelCase__)
with open(UpperCAmelCase__ , encoding='''utf-8''') as vocab_handle:
A__ = json.load(UpperCAmelCase__)
A__ = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase__ , encoding='''utf-8''') as merges_handle:
A__ = merges_handle.read().split('''\n''')[1:-1]
A__ = [tuple(merge.split()) for merge in merges]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = {}
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ = tuple(UpperCAmelCase__)
A__ = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
A__ = get_pairs(UpperCAmelCase__)
if not pairs:
return token
while True:
A__ = min(UpperCAmelCase__ , key=lambda UpperCAmelCase__: self.bpe_ranks.get(UpperCAmelCase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(UpperCAmelCase__):
try:
A__ = word.index(UpperCAmelCase__ , UpperCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
A__ = j
if word[i] == first and i < len(UpperCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
A__ = tuple(UpperCAmelCase__)
A__ = new_word
if len(UpperCAmelCase__) == 1:
break
else:
A__ = get_pairs(UpperCAmelCase__)
A__ = '''@@ '''.join(UpperCAmelCase__)
A__ = word[:-4]
A__ = word
return word
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
A__ = []
A__ = re.findall(R'''\S+\n?''' , UpperCAmelCase__)
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase__).split(''' ''')))
return split_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[int]) ->str:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase__ , self.unk_token)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
A__ = ''' '''.join(UpperCAmelCase__).replace('''@@ ''' , '''''').strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__) + '''\n''')
A__ = 0
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
A__ = token_index
writer.write(''' '''.join(UpperCAmelCase__) + '''\n''')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 87 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowerCamelCase : str = 299792458
# Symbols
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""")
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(lowercase_ ) ** 2 )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0],
[-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
A__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowerCamelCase : Tuple = transform(29979245)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1}
_lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 87 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCamelCase : Optional[Any] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
A__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
A__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
A__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = {}
import re
A__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A__ = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A__ = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
A__ = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase_ ):
A__ = re_encoder_block_conv_in.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
A__ = re_encoder_block_conv_in.sub(lowercase_ , lowercase_ )
elif re_encoder_block_resnet.fullmatch(lowercase_ ):
A__ = re_encoder_block_resnet.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = {'''1''': 1, '''3''': 2}[groups[-2]]
A__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
A__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
A__ = prefix + resnet_block
A__ = re_encoder_block_resnet.sub(lowercase_ , lowercase_ )
elif re_encoder_block_proj_out.fullmatch(lowercase_ ):
A__ = re_encoder_block_proj_out.match(lowercase_ )
A__ = regex_match.groups()
A__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
A__ = re_encoder_block_proj_out.sub(lowercase_ , lowercase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase_ ):
A__ = re_decoder_block_conv_out.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
A__ = re_decoder_block_conv_out.sub(lowercase_ , lowercase_ )
elif re_decoder_block_resnet.fullmatch(lowercase_ ):
A__ = re_decoder_block_resnet.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = {'''1''': 1, '''3''': 2}[groups[-2]]
A__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
A__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
A__ = prefix + resnet_block
A__ = re_decoder_block_resnet.sub(lowercase_ , lowercase_ )
elif re_decoder_block_proj_in.fullmatch(lowercase_ ):
A__ = re_decoder_block_proj_in.match(lowercase_ )
A__ = regex_match.groups()
A__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
A__ = re_decoder_block_proj_in.sub(lowercase_ , lowercase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase_ ):
A__ = re_prior_cond_conv_out.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
A__ = re_prior_cond_conv_out.sub(lowercase_ , lowercase_ )
elif re_prior_cond_resnet.fullmatch(lowercase_ ):
A__ = re_prior_cond_resnet.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = {'''1''': 1, '''3''': 2}[groups[-2]]
A__ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
A__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
A__ = prefix + resnet_block
A__ = re_prior_cond_resnet.sub(lowercase_ , lowercase_ )
elif re_prior_cond_proj_in.fullmatch(lowercase_ ):
A__ = re_prior_cond_proj_in.match(lowercase_ )
A__ = regex_match.groups()
A__ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
A__ = re_prior_cond_proj_in.sub(lowercase_ , lowercase_ )
# keep original key
else:
A__ = original_key
A__ = replace_key(lowercase_ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
A__ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
A__ = original_key
A__ = original_key
A__ = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_=None , lowercase_=None ) -> str:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
A__ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=lowercase_ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=lowercase_ )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
A__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
A__ = JukeboxConfig.from_pretrained(lowercase_ )
A__ = JukeboxModel(lowercase_ )
A__ = []
A__ = {}
for i, dict_name in enumerate(lowercase_ ):
A__ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
A__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
A__ = old_dic[k]
elif k.endswith('''.w''' ):
A__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A__ = old_dic[k]
else:
A__ = old_dic[k]
A__ = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
A__ = fix_jukebox_keys(lowercase_ , model.state_dict() , lowercase_ , lowercase_ )
weight_dict.append(lowercase_ )
A__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase_ )
for i in range(len(lowercase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(lowercase_ , lowercase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
return weight_dict
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 87 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ = []
def generate(lowercase_ , lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
_lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : str = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 87 | 1 |
from datetime import datetime as dt
import os
from github import Github
_lowerCamelCase : Dict = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Github(os.environ['''GITHUB_TOKEN'''] )
A__ = g.get_repo('''huggingface/transformers''' )
A__ = repo.get_issues(state='''open''' )
for issue in open_issues:
A__ = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase_ : i.created_at , reverse=lowercase_ )
A__ = comments[0] if len(lowercase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 87 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ )
A__ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''sigmoid'''
UpperCAmelCase__ = '''softmax'''
UpperCAmelCase__ = '''none'''
@add_end_docstrings(
UpperCAmelCase__ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = False
UpperCAmelCase__ = ClassificationFunction.NONE
def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int:
'''simple docstring'''
A__ = tokenizer_kwargs
A__ = {}
if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None:
A__ = self.model.config.return_all_scores
if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None:
A__ = top_k
A__ = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , )
if return_all_scores:
A__ = None
else:
A__ = 1
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A__ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A__ = '''top_k''' not in kwargs
if isinstance(args[0] , UpperCAmelCase__) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]:
'''simple docstring'''
A__ = self.framework
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__)
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__)
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''')
return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple:
'''simple docstring'''
return self.model(**UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A__ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A__ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None:
A__ = self.model.config.function_to_apply
else:
A__ = ClassificationFunction.NONE
A__ = model_outputs['''logits'''][0]
A__ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A__ = sigmoid(UpperCAmelCase__)
elif function_to_apply == ClassificationFunction.SOFTMAX:
A__ = softmax(UpperCAmelCase__)
elif function_to_apply == ClassificationFunction.NONE:
A__ = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A__ = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__)
]
if not _legacy:
dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__)
if top_k is not None:
A__ = dict_scores[:top_k]
return dict_scores
| 87 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for data in source_data:
for i, el in enumerate(lowercase_ ):
if len(lowercase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase_ ) )
return data_lists
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for dlist, weight in zip(lowercase_ , lowercase_ ):
A__ = min(lowercase_ )
A__ = max(lowercase_ )
A__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A__ = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowercase_ )
score_lists.append(lowercase_ )
return score_lists
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase_ ):
A__ = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = get_data(lowercase_ )
A__ = calculate_each_score(lowercase_ , lowercase_ )
A__ = generate_final_scores(lowercase_ )
# append scores to source data
for i, ele in enumerate(lowercase_ ):
source_data[i].append(lowercase_ )
return source_data
| 87 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : str = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''mobilenet_v1'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
A__ = num_channels
A__ = image_size
A__ = depth_multiplier
A__ = min_depth
A__ = hidden_act
A__ = tf_padding
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def SCREAMING_SNAKE_CASE ( self : int) ->float:
'''simple docstring'''
return 1e-4
| 87 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowerCamelCase : Optional[List[str]] = None
_lowerCamelCase : int = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowerCamelCase : Union[str, Any] = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = True
UpperCAmelCase__ = None
# Automatically constructed
UpperCAmelCase__ = "PIL.Image.Image"
UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCAmelCase__ = field(default='''Image''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : List[str]) ->List[str]:
'''simple docstring'''
return self.pa_type
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) ->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''')
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = np.array(UpperCAmelCase__)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase__ , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase__)
elif isinstance(UpperCAmelCase__ , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase__)
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""")
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : dict , UpperCAmelCase__ : str=None) ->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''')
if token_per_repo_id is None:
A__ = {}
A__ , A__ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""")
else:
if is_local_path(UpperCAmelCase__):
A__ = PIL.Image.open(UpperCAmelCase__)
else:
A__ = path.split('''::''')[-1]
try:
A__ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL)['''repo_id''']
A__ = token_per_repo_id.get(UpperCAmelCase__)
except ValueError:
A__ = None
with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__) as f:
A__ = BytesIO(f.read())
A__ = PIL.Image.open(bytes_)
else:
A__ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]) ->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type):
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary())
A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string())
A__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
A__ = storage.field('''bytes''')
else:
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
A__ = storage.field('''path''')
else:
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string())
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
A__ = pa.array(
[encode_np_array(np.array(UpperCAmelCase__))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string())
A__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(UpperCAmelCase__ , self.pa_type)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : pa.StructArray) ->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase__ : Dict):
with xopen(UpperCAmelCase__ , '''rb''') as f:
A__ = f.read()
return bytes_
A__ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A__ = pa.array(
[os.path.basename(UpperCAmelCase__) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(UpperCAmelCase__ , self.pa_type)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes:
"""simple docstring"""
A__ = BytesIO()
if image.format in list_image_compression_formats():
A__ = image.format
else:
A__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(lowercase_ , format=lowercase_ )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
"""simple docstring"""
if hasattr(lowercase_ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase_ )}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
A__ = array.dtype
A__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
A__ = dtype.kind
A__ = dtype.itemsize
A__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A__ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A__ = dtype_byteorder + dtype_kind + str(lowercase_ )
A__ = np.dtype(lowercase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
A__ = PIL.Image.fromarray(array.astype(lowercase_ ) )
return {"path": None, "bytes": image_to_bytes(lowercase_ )}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
A__ , A__ = first_non_null_value(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase_ , np.ndarray ):
A__ = no_op_if_value_is_null(lowercase_ )
return [obj_to_image_dict_func(lowercase_ ) for obj in objs]
elif isinstance(lowercase_ , PIL.Image.Image ):
A__ = no_op_if_value_is_null(lowercase_ )
return [obj_to_image_dict_func(lowercase_ ) for obj in objs]
else:
return objs
else:
return objs
| 87 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
_lowerCamelCase : str = 5
_lowerCamelCase : int = 10
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = SpeechaTextTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
super().setUp()
A__ = sp.SentencePieceProcessor()
spm_model.Load(UpperCAmelCase__)
A__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase__))]
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = Path(self.tmpdirname)
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = '''<pad>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(UpperCAmelCase__) , 1_001)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_001)
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
A__ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [289, 50, 14, 174, 386] , )
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8])
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__)
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
A__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = '''valhalla/s2t_mustc_multilinguial_medium'''
UpperCAmelCase__ = '''C\'est trop cool'''
UpperCAmelCase__ = '''Esto es genial'''
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict:
'''simple docstring'''
A__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 10_000)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids)
A__ = [ES_CODE, 4, 1_601, 47, 7_647, 2]
A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = '''fr'''
A__ = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , UpperCAmelCase__)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
A__ = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 87 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : Tuple = """sshleifer/bart-tiny-random"""
_lowerCamelCase : Optional[int] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 87 |
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
"""simple docstring"""
A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowercase_ ).json()
def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]:
"""simple docstring"""
A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
A__ = requests.get(lowercase_ ).json()[:max_stories]
return [get_hackernews_story(lowercase_ ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str:
"""simple docstring"""
A__ = hackernews_top_stories(lowercase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 87 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowerCamelCase : Tuple = 250004
_lowerCamelCase : Any = 250020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = MBartaaTokenizer
UpperCAmelCase__ = MBartaaTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = MBartaaTokenizer(UpperCAmelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = '''<s>'''
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(UpperCAmelCase__) , 1_054)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_054)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
'''simple docstring'''
A__ = MBartaaTokenizer(UpperCAmelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCAmelCase__)
A__ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
self.assertListEqual(
UpperCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__)
self.assertListEqual(
UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = {'''input_ids''': [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
A__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(UpperCAmelCase__)
A__ = tokenizer_p.save_pretrained(UpperCAmelCase__)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
A__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f)
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__)
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(UpperCAmelCase__)
A__ = tokenizer_p.from_pretrained(UpperCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase__)
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__)
A__ = tokenizer_p.save_pretrained(UpperCAmelCase__)
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__)
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(UpperCAmelCase__)
A__ = tokenizer_p.from_pretrained(UpperCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__))
shutil.rmtree(UpperCAmelCase__)
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__)
A__ = tokenizer_p.save_pretrained(UpperCAmelCase__)
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(UpperCAmelCase__)
A__ = tokenizer_p.from_pretrained(UpperCAmelCase__)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__))
shutil.rmtree(UpperCAmelCase__)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = '''facebook/mbart-large-50-one-to-many-mmt'''
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase__ = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict:
'''simple docstring'''
A__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''')
A__ = 1
return cls
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250_001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250_004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250_020)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250_038)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids)
A__ = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCAmelCase__)
A__ = 10
A__ = self.tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase__)
self.assertEqual(ids[-1] , 2)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR''']) , [250_053, 250_001])
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase__)
A__ = MBartaaTokenizer.from_pretrained(UpperCAmelCase__)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase__)
@require_torch
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''')
A__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , )
A__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__)
self.assertEqual(2 , batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=3 , return_tensors='''pt''')
A__ = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=10 , return_tensors='''pt''')
A__ = targets['''input_ids''']
A__ = shift_tokens_right(UpperCAmelCase__ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''')
self.assertEqual(
nested_simplify(UpperCAmelCase__) , {
# en_XX, A, test, EOS
'''input_ids''': [[250_004, 62, 3_034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250_001,
} , )
| 87 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowerCamelCase : Optional[List[str]] = None
_lowerCamelCase : int = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowerCamelCase : Union[str, Any] = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = True
UpperCAmelCase__ = None
# Automatically constructed
UpperCAmelCase__ = "PIL.Image.Image"
UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCAmelCase__ = field(default='''Image''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : List[str]) ->List[str]:
'''simple docstring'''
return self.pa_type
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) ->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''')
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = np.array(UpperCAmelCase__)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase__ , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase__)
elif isinstance(UpperCAmelCase__ , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase__)
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""")
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : dict , UpperCAmelCase__ : str=None) ->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''')
if token_per_repo_id is None:
A__ = {}
A__ , A__ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""")
else:
if is_local_path(UpperCAmelCase__):
A__ = PIL.Image.open(UpperCAmelCase__)
else:
A__ = path.split('''::''')[-1]
try:
A__ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL)['''repo_id''']
A__ = token_per_repo_id.get(UpperCAmelCase__)
except ValueError:
A__ = None
with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__) as f:
A__ = BytesIO(f.read())
A__ = PIL.Image.open(bytes_)
else:
A__ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]) ->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type):
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary())
A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string())
A__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
A__ = storage.field('''bytes''')
else:
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
A__ = storage.field('''path''')
else:
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string())
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
A__ = pa.array(
[encode_np_array(np.array(UpperCAmelCase__))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string())
A__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(UpperCAmelCase__ , self.pa_type)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : pa.StructArray) ->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase__ : Dict):
with xopen(UpperCAmelCase__ , '''rb''') as f:
A__ = f.read()
return bytes_
A__ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A__ = pa.array(
[os.path.basename(UpperCAmelCase__) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(UpperCAmelCase__ , self.pa_type)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes:
"""simple docstring"""
A__ = BytesIO()
if image.format in list_image_compression_formats():
A__ = image.format
else:
A__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(lowercase_ , format=lowercase_ )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
"""simple docstring"""
if hasattr(lowercase_ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase_ )}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
A__ = array.dtype
A__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
A__ = dtype.kind
A__ = dtype.itemsize
A__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A__ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A__ = dtype_byteorder + dtype_kind + str(lowercase_ )
A__ = np.dtype(lowercase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
A__ = PIL.Image.fromarray(array.astype(lowercase_ ) )
return {"path": None, "bytes": image_to_bytes(lowercase_ )}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
A__ , A__ = first_non_null_value(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase_ , np.ndarray ):
A__ = no_op_if_value_is_null(lowercase_ )
return [obj_to_image_dict_func(lowercase_ ) for obj in objs]
elif isinstance(lowercase_ , PIL.Image.Image ):
A__ = no_op_if_value_is_null(lowercase_ )
return [obj_to_image_dict_func(lowercase_ ) for obj in objs]
else:
return objs
else:
return objs
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[int] = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class in get_values(UpperCAmelCase__):
A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = embedding_size
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertModel(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
A__ = [input_ids, input_mask]
A__ = model(UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]:
'''simple docstring'''
A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
A__ = self.num_choices
A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__)
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1))
A__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ = self.num_labels
A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = TFMobileBertModelTest.TFMobileBertModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''')
A__ = tf.constant([[0, 1, 2, 3, 4, 5]])
A__ = model(UpperCAmelCase__)[0]
A__ = [1, 6, 30_522]
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
| 87 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def SCREAMING_SNAKE_CASE ( lowercase_ = True , *lowercase_ , **lowercase_ ) -> List[Any]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
A__ = False
if main_process_only:
A__ = PartialState().local_process_index == 0
return _tqdm(*lowercase_ , **lowercase_ , disable=lowercase_ )
| 87 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''height''': 18, '''width''': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = EfficientFormerImageProcessorTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 87 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , lowercase_ , (alpha * (np.exp(lowercase_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_lowerCamelCase : Dict = 6_378_137.0
_lowerCamelCase : Union[str, Any] = 6_356_752.314_245
_lowerCamelCase : List[Any] = 6378137
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A__ = (b_lata + b_lata) / 2
A__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2)
A__ = cos(sigma / 2 ) ** 2
A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2)
A__ = sin(sigma / 2 ) ** 2
A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowerCamelCase : int = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowerCamelCase : List[str] = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowercase_ )[0]
@deprecated(lowercase_ , '''Please use tf.data to implement this functionality.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowercase_ ) as bytestream:
A__ = _readaa(lowercase_ )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
A__ = _readaa(lowercase_ )
A__ = _readaa(lowercase_ )
A__ = _readaa(lowercase_ )
A__ = bytestream.read(rows * cols * num_images )
A__ = numpy.frombuffer(lowercase_ , dtype=numpy.uinta )
A__ = data.reshape(lowercase_ , lowercase_ , lowercase_ , 1 )
return data
@deprecated(lowercase_ , '''Please use tf.one_hot on tensors.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = labels_dense.shape[0]
A__ = numpy.arange(lowercase_ ) * num_classes
A__ = numpy.zeros((num_labels, num_classes) )
A__ = 1
return labels_one_hot
@deprecated(lowercase_ , '''Please use tf.data to implement this functionality.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False , lowercase_=10 ) -> List[Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowercase_ ) as bytestream:
A__ = _readaa(lowercase_ )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
A__ = _readaa(lowercase_ )
A__ = bytestream.read(lowercase_ )
A__ = numpy.frombuffer(lowercase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowercase_ , lowercase_ )
return labels
class UpperCamelCase_ :
'''simple docstring'''
@deprecated(
UpperCAmelCase__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : str=dtypes.floataa , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=None , ) ->List[Any]:
'''simple docstring'''
A__ , A__ = random_seed.get_seed(UpperCAmelCase__)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
A__ = dtypes.as_dtype(UpperCAmelCase__).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
A__ = 10_000
A__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
A__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A__ = images.astype(numpy.floataa)
A__ = numpy.multiply(UpperCAmelCase__ , 1.0 / 255.0)
A__ = images
A__ = labels
A__ = 0
A__ = 0
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
return self._images
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
return self._labels
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
return self._num_examples
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
return self._epochs_completed
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True) ->Optional[int]:
'''simple docstring'''
if fake_data:
A__ = [1] * 784
A__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase__)],
[fake_label for _ in range(UpperCAmelCase__)],
)
A__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A__ = numpy.arange(self._num_examples)
numpy.random.shuffle(UpperCAmelCase__)
A__ = self.images[perma]
A__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A__ = self._num_examples - start
A__ = self._images[start : self._num_examples]
A__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A__ = numpy.arange(self._num_examples)
numpy.random.shuffle(UpperCAmelCase__)
A__ = self.images[perm]
A__ = self.labels[perm]
# Start next epoch
A__ = 0
A__ = batch_size - rest_num_examples
A__ = self._index_in_epoch
A__ = self._images[start:end]
A__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
A__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowercase_ , '''Please write your own downloading logic.''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
if not gfile.Exists(lowercase_ ):
gfile.MakeDirs(lowercase_ )
A__ = os.path.join(lowercase_ , lowercase_ )
if not gfile.Exists(lowercase_ ):
urllib.request.urlretrieve(lowercase_ , lowercase_ ) # noqa: S310
with gfile.GFile(lowercase_ ) as f:
A__ = f.size()
print('''Successfully downloaded''' , lowercase_ , lowercase_ , '''bytes.''' )
return filepath
@deprecated(
lowercase_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False , lowercase_=False , lowercase_=dtypes.floataa , lowercase_=True , lowercase_=5_000 , lowercase_=None , lowercase_=DEFAULT_SOURCE_URL , ) -> Any:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowercase_ , one_hot=lowercase_ , dtype=lowercase_ , seed=lowercase_ )
A__ = fake()
A__ = fake()
A__ = fake()
return _Datasets(train=lowercase_ , validation=lowercase_ , test=lowercase_ )
if not source_url: # empty string check
A__ = DEFAULT_SOURCE_URL
A__ = '''train-images-idx3-ubyte.gz'''
A__ = '''train-labels-idx1-ubyte.gz'''
A__ = '''t10k-images-idx3-ubyte.gz'''
A__ = '''t10k-labels-idx1-ubyte.gz'''
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + train_images_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_images(lowercase_ )
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + train_labels_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_labels(lowercase_ , one_hot=lowercase_ )
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + test_images_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_images(lowercase_ )
A__ = _maybe_download(
lowercase_ , lowercase_ , source_url + test_labels_file )
with gfile.Open(lowercase_ , '''rb''' ) as f:
A__ = _extract_labels(lowercase_ , one_hot=lowercase_ )
if not 0 <= validation_size <= len(lowercase_ ):
A__ = (
'''Validation size should be between 0 and '''
f"""{len(lowercase_ )}. Received: {validation_size}."""
)
raise ValueError(lowercase_ )
A__ = train_images[:validation_size]
A__ = train_labels[:validation_size]
A__ = train_images[validation_size:]
A__ = train_labels[validation_size:]
A__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
A__ = _DataSet(lowercase_ , lowercase_ , **lowercase_ )
A__ = _DataSet(lowercase_ , lowercase_ , **lowercase_ )
A__ = _DataSet(lowercase_ , lowercase_ , **lowercase_ )
return _Datasets(train=lowercase_ , validation=lowercase_ , test=lowercase_ )
| 87 |
import heapq
import sys
import numpy as np
_lowerCamelCase : Any = tuple[int, int]
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Any) ->str:
'''simple docstring'''
A__ = []
A__ = set()
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
return len(self.elements) == 0
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->List[str]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(UpperCAmelCase__)
else:
# update
# print("update", item)
A__ = []
((A__) , (A__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((A__) , (A__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
if item in self.set:
self.set.remove(UpperCAmelCase__)
A__ = []
((A__) , (A__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((A__) , (A__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return self.elements[0][1]
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
((A__) , (A__)) = heapq.heappop(self.elements)
self.set.remove(UpperCAmelCase__)
return (priority, item)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = np.array(lowercase_ )
A__ = np.array(lowercase_ )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
return consistent_heuristic(lowercase_ , lowercase_ ) // t
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ )
return ans
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = np.chararray((n, n) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
A__ = '''*'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (j, (n - 1) - i) in blocks:
A__ = '''#'''
A__ = '''-'''
A__ = back_pointer[goal]
while x != start:
((A__) , (A__)) = x
# print(x)
A__ = '''-'''
A__ = back_pointer[x]
A__ = '''-'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A__ = back_pointer[goal]
while x != start:
print(lowercase_ , end=''' ''' )
A__ = back_pointer[x]
print(lowercase_ )
sys.exit()
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]:
"""simple docstring"""
for itera in range(lowercase_ ):
open_list[itera].remove_element(lowercase_ )
# print("s", s)
# print("j", j)
((A__) , (A__)) = s
A__ = (x - 1, y)
A__ = (x + 1, y)
A__ = (x, y + 1)
A__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase_ )
A__ = -1
A__ = float('''inf''' )
if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1:
A__ = g_function[s] + 1
A__ = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase_ ):
if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key(
lowercase_ , 0 , lowercase_ , lowercase_ ):
open_list[j].put(
lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_lowerCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_lowerCamelCase : Optional[int] = make_common_ground()
_lowerCamelCase : Optional[Any] = blocks_blk
# hyper parameters
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : List[Any] = 20
_lowerCamelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
_lowerCamelCase : str = (0, 0)
_lowerCamelCase : Tuple = (n - 1, n - 1)
_lowerCamelCase : int = 1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = {start: 0, goal: float('''inf''' )}
A__ = {start: -1, goal: -1}
A__ = []
A__ = set()
for i in range(lowercase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
A__ = []
A__ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowercase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
A__ , A__ = open_list[i].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_inad.append(lowercase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
A__ = open_list[0].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_anchor.append(lowercase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 87 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=[10, 20, 30, 40] , UpperCAmelCase__ : Tuple=[2, 2, 3, 2] , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Tuple=[2, 3, 4] , UpperCAmelCase__ : Any=None , ) ->Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict) ->Optional[int]:
'''simple docstring'''
A__ = ConvNextModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]) ->str:
'''simple docstring'''
A__ = ConvNextForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str) ->Optional[Any]:
'''simple docstring'''
A__ = ConvNextBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
'''simple docstring'''
A__ = ConvNextModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int):
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
A__ = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# verify the logits
A__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = torch.tensor([-0.0260, -0.4739, 0.1911]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@require_torch
class UpperCamelCase_ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ = ConvNextConfig
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = ConvNextModelTester(self)
| 87 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A__ = get_sagemaker_input()
else:
A__ = get_cluster_input()
return config
def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]:
"""simple docstring"""
if subparsers is not None:
A__ = subparsers.add_parser('''config''' , description=lowercase_ )
else:
A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ )
parser.add_argument(
'''--config_file''' , default=lowercase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = get_user_input()
if args.config_file is not None:
A__ = args.config_file
else:
if not os.path.isdir(lowercase_ ):
os.makedirs(lowercase_ )
A__ = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowercase_ )
else:
config.to_yaml_file(lowercase_ )
print(f"""accelerate configuration saved at {config_file}""" )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = config_command_parser()
A__ = parser.parse_args()
config_command(lowercase_ )
if __name__ == "__main__":
main()
| 87 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ = []
def generate(lowercase_ , lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
_lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : str = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 87 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""")
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
hf_model.apply_weight_norm()
A__ = checkpoint['''input_conv.weight_g''']
A__ = checkpoint['''input_conv.weight_v''']
A__ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""]
A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""]
A__ = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
A__ = checkpoint['''output_conv.1.weight_g''']
A__ = checkpoint['''output_conv.1.weight_v''']
A__ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str:
"""simple docstring"""
if config_path is not None:
A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ )
else:
A__ = SpeechTaHifiGanConfig()
A__ = SpeechTaHifiGan(lowercase_ )
A__ = torch.load(lowercase_ )
load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ )
A__ = np.load(lowercase_ )
A__ = stats[0].reshape(-1 )
A__ = stats[1].reshape(-1 )
A__ = torch.from_numpy(lowercase_ ).float()
A__ = torch.from_numpy(lowercase_ ).float()
model.save_pretrained(lowercase_ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 87 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = state_dict.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A__ = value
else:
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A__ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A__ = in_proj_weight_cross_attn[:256, :]
A__ = in_proj_bias_cross_attn[:256]
A__ = in_proj_weight_cross_attn[256:512, :]
A__ = in_proj_bias_cross_attn[256:512]
A__ = in_proj_weight_cross_attn[-256:, :]
A__ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ , A__ = image.size
A__ = max(lowercase_ , lowercase_ )
A__ = 800 if '''detection''' in checkpoint_url else 1_000
A__ = target_max_size / current_max_size
A__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = F.to_tensor(lowercase_ )
A__ = F.normalize(lowercase_ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
A__ = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A__ = state_dict.pop(lowercase_ )
A__ = val
# create HuggingFace model and load state dict
A__ = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A__ = 15
A__ = 2
A__ = {0: '''table''', 1: '''table rotated'''}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
else:
A__ = 125
A__ = 6
A__ = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1_000 )
A__ = TableTransformerForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# verify our conversion
A__ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A__ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase_ )
A__ = Image.open(lowercase_ ).convert('''RGB''' )
A__ = normalize(resize(lowercase_ , lowercase_ ) ).unsqueeze(0 )
A__ = model(lowercase_ )
if "detection" in checkpoint_url:
A__ = (1, 15, 3)
A__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A__ = (1, 125, 7)
A__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A__ = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowercase_ )
image_processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = use_labels
A__ = scope
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict:
'''simple docstring'''
A__ = BertGenerationEncoder(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
A__ = model(UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
A__ = True
A__ = BertGenerationEncoder(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any:
'''simple docstring'''
A__ = True
A__ = True
A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval()
# first forward pass
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1)
A__ = torch.cat([input_mask, next_mask] , dim=-1)
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
A__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]:
'''simple docstring'''
A__ = BertGenerationDecoder(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = BertGenerationEncoderTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
A__ = '''bert'''
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
self.assertIsNotNone(UpperCAmelCase__)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
A__ = model(UpperCAmelCase__)[0]
A__ = torch.Size([1, 8, 1_024])
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]])
with torch.no_grad():
A__ = model(UpperCAmelCase__)[0]
A__ = torch.Size([1, 8, 50_358])
self.assertEqual(output.shape , UpperCAmelCase__)
A__ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
| 87 | 1 |
from scipy.stats import spearmanr
import datasets
_lowerCamelCase : List[Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_lowerCamelCase : Any = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_lowerCamelCase : List[Any] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=False) ->int:
'''simple docstring'''
A__ = spearmanr(UpperCAmelCase__ , UpperCAmelCase__)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 87 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = set()
A__ = []
def parse_line(lowercase_ ):
for line in fp:
if isinstance(lowercase_ , lowercase_ ):
A__ = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase_ ) > 0:
A__ = '''\n'''.join(lowercase_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowercase_ )
buffer.clear()
continue
else:
A__ = line.strip()
buffer.append(lowercase_ )
if from_gh:
for filename in os.listdir(lowercase_ ):
A__ = os.path.join(lowercase_ , lowercase_ )
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase_ ) as fp:
parse_line(lowercase_ )
else:
try:
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase_ ) as fp:
parse_line(lowercase_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = set()
A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return values.split(''',''' )
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets)
_lowerCamelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 87 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger("""transformers.models.encodec""")
_lowerCamelCase : str = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
_lowerCamelCase : Union[str, Any] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
_lowerCamelCase : List[Any] = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
_lowerCamelCase : Any = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
_lowerCamelCase : Tuple = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
_lowerCamelCase : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCamelCase : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCamelCase : Any = []
_lowerCamelCase : int = []
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
A__ = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A__ = getattr(lowercase_ , lowercase_ ).shape
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "running_mean":
A__ = value
elif weight_type == "running_var":
A__ = value
elif weight_type == "num_batches_tracked":
A__ = value
elif weight_type == "weight_ih_l0":
A__ = value
elif weight_type == "weight_hh_l0":
A__ = value
elif weight_type == "bias_ih_l0":
A__ = value
elif weight_type == "bias_hh_l0":
A__ = value
elif weight_type == "weight_ih_l1":
A__ = value
elif weight_type == "weight_hh_l1":
A__ = value
elif weight_type == "bias_ih_l1":
A__ = value
elif weight_type == "bias_hh_l1":
A__ = value
else:
A__ = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
A__ = MAPPING_24K
elif model_name == "encodec_48khz":
A__ = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(f"""{name} was ignored""" )
continue
A__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A__ , A__ = key.split('''.*.''' )
if prefix in name and suffix in name:
A__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
A__ = True
if "*" in mapped_key:
A__ = name.split(lowercase_ )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "weight_ih_l0" in name:
A__ = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
A__ = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
A__ = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
A__ = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
A__ = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
A__ = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
A__ = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
A__ = '''bias_hh_l1'''
elif "bias" in name:
A__ = '''bias'''
elif "weight" in name:
A__ = '''weight'''
elif "running_mean" in name:
A__ = '''running_mean'''
elif "running_var" in name:
A__ = '''running_var'''
elif "num_batches_tracked" in name:
A__ = '''num_batches_tracked'''
else:
A__ = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
A__ = EncodecConfig.from_pretrained(lowercase_ )
else:
A__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A__ = [8, 5, 4, 4]
A__ = [2.2]
A__ = 64
A__ = 32_000
A__ = 2_048
A__ = False
A__ = False
A__ = False
elif model_name == "encodec_48khz":
A__ = [8, 5, 4, 2]
A__ = [3.0, 6.0, 12.0, 24.0]
A__ = 48_000
A__ = 2
A__ = False
A__ = '''time_group_norm'''
A__ = True
A__ = 1.0
A__ = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
A__ = EncodecModel(lowercase_ )
A__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowercase_ )
A__ = torch.load(lowercase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A__ = original_checkpoint['''best_state''']
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 87 |
class UpperCamelCase_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None:
'''simple docstring'''
A__ = row
A__ = col
A__ = graph
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None:
'''simple docstring'''
A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands.
'''simple docstring'''
A__ = [[False for j in range(self.COL)] for i in range(self.ROW)]
A__ = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
count += 1
return count
| 87 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.